Neon shuffle GEMM and IGEMM kernels.

M1 is 7.1% faster on mobilenet_v2
M2 is 6.5% faster on mobilenet_v2

PiperOrigin-RevId: 281623279
diff --git a/src/f32-gemm/1x8s4-neon.c b/src/f32-gemm/1x8s4-neon.c
new file mode 100644
index 0000000..6986389
--- /dev/null
+++ b/src/f32-gemm/1x8s4-neon.c
@@ -0,0 +1,133 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_1x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/1x8s4-neonfma.c b/src/f32-gemm/1x8s4-neonfma.c
new file mode 100644
index 0000000..e993dd6
--- /dev/null
+++ b/src/f32-gemm/1x8s4-neonfma.c
@@ -0,0 +1,133 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_1x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/4x8s4-neon.c b/src/f32-gemm/4x8s4-neon.c
new file mode 100644
index 0000000..7783666
--- /dev/null
+++ b/src/f32-gemm/4x8s4-neon.c
@@ -0,0 +1,244 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_4x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/4x8s4-neonfma.c b/src/f32-gemm/4x8s4-neonfma.c
new file mode 100644
index 0000000..a5e37c1
--- /dev/null
+++ b/src/f32-gemm/4x8s4-neonfma.c
@@ -0,0 +1,244 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_4x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/6x8s4-neon.c b/src/f32-gemm/6x8s4-neon.c
new file mode 100644
index 0000000..706d1f6
--- /dev/null
+++ b/src/f32-gemm/6x8s4-neon.c
@@ -0,0 +1,318 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_6x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/6x8s4-neonfma.c b/src/f32-gemm/6x8s4-neonfma.c
new file mode 100644
index 0000000..9a1e7c5
--- /dev/null
+++ b/src/f32-gemm/6x8s4-neonfma.c
@@ -0,0 +1,318 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_6x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/8x8s4-neon.c b/src/f32-gemm/8x8s4-neon.c
new file mode 100644
index 0000000..fe3b8a7
--- /dev/null
+++ b/src/f32-gemm/8x8s4-neon.c
@@ -0,0 +1,392 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_8x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+    float32x4_t vacc6x0123 = vacc0x0123;
+    float32x4_t vacc6x4567 = vacc0x4567;
+    float32x4_t vacc7x0123 = vacc0x0123;
+    float32x4_t vacc7x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+      float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+      float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c0);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c0);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c1);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c1);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c2);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c2);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c3);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c3);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+        const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+        const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a7 = (const float*) ((uintptr_t) a7 - kc);
+      a6 = (const float*) ((uintptr_t) a6 - kc);
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/8x8s4-neonfma.c b/src/f32-gemm/8x8s4-neonfma.c
new file mode 100644
index 0000000..c2aeda4
--- /dev/null
+++ b/src/f32-gemm/8x8s4-neonfma.c
@@ -0,0 +1,392 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_8x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+    float32x4_t vacc6x0123 = vacc0x0123;
+    float32x4_t vacc6x4567 = vacc0x4567;
+    float32x4_t vacc7x0123 = vacc0x0123;
+    float32x4_t vacc7x4567 = vacc0x4567;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+      float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+      float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c0);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c0);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c1);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c1);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c2);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c2);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c3);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c3);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+        const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+        const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a7 = (const float*) ((uintptr_t) a7 - kc);
+      a6 = (const float*) ((uintptr_t) a6 - kc);
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/neon-shuffle.c.in b/src/f32-gemm/neon-shuffle.c.in
new file mode 100644
index 0000000..b9481ca
--- /dev/null
+++ b/src/f32-gemm/neon-shuffle.c.in
@@ -0,0 +1,161 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert NR % 4 == 0
+$IDLETTERS = "0123456789ABCDEFGHIJKLMN"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm${"inc" if INC else ""}_ukernel_${MR}x${NR}s4__${"neonfma" if FMA else "neon"}(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    $if INC:
+      const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  $if INC:
+    assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  $for M in range(1, MR):
+    const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
+    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+
+  do {
+    $if INC:
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vacc${M}x${IDLETTERS[N:N+4]} = vld1q_f32(acc); acc += 4;
+    $else:
+      $for N in range(0, NR, 4):
+        float32x4_t vacc0x${IDLETTERS[N:N+4]} = vld1q_f32(w); w += 4;
+      $for M in range(1, MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vacc${M}x${IDLETTERS[N:N+4]} = vacc0x${IDLETTERS[N:N+4]};
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      $for M in range(MR):
+        float32x4_t va${M} = vld1q_f32(a${M}); a${M} += 4;
+
+      $for L in range(4):
+
+        $for N in range(0, NR, 4):
+          const float32x4_t vb${IDLETTERS[N:N+4]}c${L} = vld1q_f32(w + ${L * NR + N});
+
+        $for N in range(0, NR, 4):
+          $for M in range(MR):
+            vacc${M}x${IDLETTERS[N:N+4]} = ${VMULADDQ_F32}(vacc${M}x${IDLETTERS[N:N+4]}, va${M}, vb${IDLETTERS[N:N+4]}c${L});
+
+        $if L + 1 != 4:
+          $for M in range(MR):
+            va${M} = vextq_f32(va${M}, va${M}, 1);
+
+      w += ${4 * NR};
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        $for M in range(MR):
+          const float32x4_t va${M} = vld1q_dup_f32(a${M}); a${M} += 1;
+
+        $for N in range(0, NR, 4):
+          const float32x4_t vb${IDLETTERS[N:N+4]} = vld1q_f32(w); w += 4;
+
+        $for N in range(0, NR, 4):
+          $for M in range(MR):
+            vacc${M}x${IDLETTERS[N:N+4]} = ${VMULADDQ_F32}(vacc${M}x${IDLETTERS[N:N+4]}, va${M}, vb${IDLETTERS[N:N+4]});
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    $for N in range(0, NR, 4):
+      $for M in range(MR):
+        vacc${M}x${IDLETTERS[N:N+4]} = vminq_f32(vacc${M}x${IDLETTERS[N:N+4]}, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    $for N in range(0, NR, 4):
+      $for M in range(MR):
+        vacc${M}x${IDLETTERS[N:N+4]} = vmaxq_f32(vacc${M}x${IDLETTERS[N:N+4]}, vmin);
+
+    if XNN_LIKELY(nc >= ${NR}) {
+      $for M in reversed(range(MR)):
+        vst1q_f32(c${M}, vacc${M}x${IDLETTERS[0:4]});
+        $for N in range(4, NR, 4):
+          vst1q_f32(c${M} + ${N}, vacc${M}x${IDLETTERS[N:N+4]});
+        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
+
+      $for M in reversed(range(MR)):
+        a${M} = (const float*) ((uintptr_t) a${M} - kc);
+
+      nc -= ${NR};
+
+    } else {
+      $for LOG2N in reversed(range(NR.bit_length())):
+        $if NR != 1 << LOG2N:
+          if (nc & ${1 << LOG2N}) {
+            $if LOG2N >= 2:
+              $for N in range(0, 1 << LOG2N, 4):
+                $for M in reversed(range(MR)):
+                  vst1q_f32(c${M}, vacc${M}x${IDLETTERS[N:N+4]}); c${M} += 4;
+
+              $for M in reversed(range(MR)):
+                $for N in range(0, 1 << (LOG2N - 1), 4):
+                  vacc${M}x${IDLETTERS[N:N+4]} = vacc${M}x${IDLETTERS[N + (1 << LOG2N):N + (1 <<   LOG2N)+4]};
+            $elif LOG2N == 1:
+              $for M in reversed(range(MR)):
+                vst1_f32(c${M}, vacc${M}x${IDLETTERS[0:2]}); c${M} += 2;
+
+              $for M in reversed(range(MR)):
+                vacc${M}x${IDLETTERS[0:2]} = vget_high_f32(vacc${M}x${IDLETTERS[0:4]});
+            $elif LOG2N == 0:
+              $for M in reversed(range(MR)):
+                vst1_lane_f32(c${M}, vacc${M}x${IDLETTERS[0:2]}, 0);
+          }
+          $if LOG2N == 2:
+            $for M in reversed(range(MR)):
+              float32x2_t vacc${M}x${IDLETTERS[0:2]} = vget_low_f32(vacc${M}x${IDLETTERS[0:4]});
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
\ No newline at end of file
diff --git a/src/f32-gemminc/1x8s4-neon.c b/src/f32-gemminc/1x8s4-neon.c
new file mode 100644
index 0000000..4a6f670
--- /dev/null
+++ b/src/f32-gemminc/1x8s4-neon.c
@@ -0,0 +1,135 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_1x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/1x8s4-neonfma.c b/src/f32-gemminc/1x8s4-neonfma.c
new file mode 100644
index 0000000..c7dbc9f
--- /dev/null
+++ b/src/f32-gemminc/1x8s4-neonfma.c
@@ -0,0 +1,135 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_1x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/4x8s4-neon.c b/src/f32-gemminc/4x8s4-neon.c
new file mode 100644
index 0000000..80dfb29
--- /dev/null
+++ b/src/f32-gemminc/4x8s4-neon.c
@@ -0,0 +1,246 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_4x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/4x8s4-neonfma.c b/src/f32-gemminc/4x8s4-neonfma.c
new file mode 100644
index 0000000..2356442
--- /dev/null
+++ b/src/f32-gemminc/4x8s4-neonfma.c
@@ -0,0 +1,246 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_4x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/6x8s4-neon.c b/src/f32-gemminc/6x8s4-neon.c
new file mode 100644
index 0000000..6340245
--- /dev/null
+++ b/src/f32-gemminc/6x8s4-neon.c
@@ -0,0 +1,320 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_6x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/6x8s4-neonfma.c b/src/f32-gemminc/6x8s4-neonfma.c
new file mode 100644
index 0000000..ccc6a38
--- /dev/null
+++ b/src/f32-gemminc/6x8s4-neonfma.c
@@ -0,0 +1,320 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_6x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/8x8s4-neon.c b/src/f32-gemminc/8x8s4-neon.c
new file mode 100644
index 0000000..77c6228
--- /dev/null
+++ b/src/f32-gemminc/8x8s4-neon.c
@@ -0,0 +1,394 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_8x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc6x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc6x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc7x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc7x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+      float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+      float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c0);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c0);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c1);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c1);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c2);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c2);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c3);
+      vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c3);
+      vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+      vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c3);
+      vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+        const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+        const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a7 = (const float*) ((uintptr_t) a7 - kc);
+      a6 = (const float*) ((uintptr_t) a6 - kc);
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemminc/8x8s4-neonfma.c b/src/f32-gemminc/8x8s4-neonfma.c
new file mode 100644
index 0000000..0b169f0
--- /dev/null
+++ b/src/f32-gemminc/8x8s4-neonfma.c
@@ -0,0 +1,394 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_8x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float* restrict a,
+    size_t a_stride,
+    const float* restrict w,
+    float* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc6x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc6x4567 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc7x0123 = vld1q_f32(acc); acc += 4;
+    float32x4_t vacc7x4567 = vld1q_f32(acc); acc += 4;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+      float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+      float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+      float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+      float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+      float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+      float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+      float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+      const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+      const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c0);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c0);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+      const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c1);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c1);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+      const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c2);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c2);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2);
+
+      va0 = vextq_f32(va0, va0, 1);
+      va1 = vextq_f32(va1, va1, 1);
+      va2 = vextq_f32(va2, va2, 1);
+      va3 = vextq_f32(va3, va3, 1);
+      va4 = vextq_f32(va4, va4, 1);
+      va5 = vextq_f32(va5, va5, 1);
+      va6 = vextq_f32(va6, va6, 1);
+      va7 = vextq_f32(va7, va7, 1);
+
+      const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+      const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+      vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+      vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+      vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+      vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+      vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+      vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+      vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c3);
+      vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c3);
+      vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+      vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+      vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+      vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+      vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+      vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+      vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c3);
+      vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+        const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+        const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+        const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+        const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+        const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+        const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+        const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+        const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+        const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567);
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a7 = (const float*) ((uintptr_t) a7 - kc);
+      a6 = (const float*) ((uintptr_t) a6 - kc);
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/1x8s4-neon.c b/src/f32-igemm/1x8s4-neon.c
new file mode 100644
index 0000000..12304dd
--- /dev/null
+++ b/src/f32-igemm/1x8s4-neon.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_1x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+          vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/1x8s4-neonfma.c b/src/f32-igemm/1x8s4-neonfma.c
new file mode 100644
index 0000000..b63f65e
--- /dev/null
+++ b/src/f32-igemm/1x8s4-neonfma.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_1x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+          vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/4x8s4-neon.c b/src/f32-igemm/4x8s4-neon.c
new file mode 100644
index 0000000..a56bab6
--- /dev/null
+++ b/src/f32-igemm/4x8s4-neon.c
@@ -0,0 +1,265 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_4x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+          vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/4x8s4-neonfma.c b/src/f32-igemm/4x8s4-neonfma.c
new file mode 100644
index 0000000..bdc8f42
--- /dev/null
+++ b/src/f32-igemm/4x8s4-neonfma.c
@@ -0,0 +1,265 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_4x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+          vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/6x8s4-neon.c b/src/f32-igemm/6x8s4-neon.c
new file mode 100644
index 0000000..881eb88
--- /dev/null
+++ b/src/f32-igemm/6x8s4-neon.c
@@ -0,0 +1,343 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_6x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+        float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+        float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+          const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+          const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+          vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+          vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+          vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+          vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+          vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/6x8s4-neonfma.c b/src/f32-igemm/6x8s4-neonfma.c
new file mode 100644
index 0000000..61d7af1
--- /dev/null
+++ b/src/f32-igemm/6x8s4-neonfma.c
@@ -0,0 +1,343 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_6x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+        float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+        float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+          const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+          const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+          vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+          vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+          vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+          vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+          vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/8x8s4-neon.c b/src/f32-igemm/8x8s4-neon.c
new file mode 100644
index 0000000..2a4439c
--- /dev/null
+++ b/src/f32-igemm/8x8s4-neon.c
@@ -0,0 +1,421 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_8x8s4__neon(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (8 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    c5 = c4;
+  }
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    c6 = c5;
+  }
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+    float32x4_t vacc6x0123 = vacc0x0123;
+    float32x4_t vacc6x4567 = vacc0x4567;
+    float32x4_t vacc7x0123 = vacc0x0123;
+    float32x4_t vacc7x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      const float* restrict a6 = a[6];
+      assert(a6 != NULL);
+      if XNN_UNPREDICTABLE(a6 != zero) {
+        a6 = (const float*) ((uintptr_t) a6 + a_offset);
+      }
+      const float* restrict a7 = a[7];
+      assert(a7 != NULL);
+      if XNN_UNPREDICTABLE(a7 != zero) {
+        a7 = (const float*) ((uintptr_t) a7 + a_offset);
+      }
+      a += 8;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+        float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+        float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+        float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+        float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c0);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c0);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c1);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c1);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c2);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c2);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
+        vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
+        vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c3);
+        vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c3);
+        vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
+        vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
+        vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
+        vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c3);
+        vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+          const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+          const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+          const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+          const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
+          vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
+          vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
+          vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123);
+          vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123);
+          vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
+          vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
+          vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
+          vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567);
+          vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 8 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/8x8s4-neonfma.c b/src/f32-igemm/8x8s4-neonfma.c
new file mode 100644
index 0000000..4787568
--- /dev/null
+++ b/src/f32-igemm/8x8s4-neonfma.c
@@ -0,0 +1,421 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/neon-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_8x8s4__neonfma(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (8 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    c5 = c4;
+  }
+  float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    c6 = c5;
+  }
+  float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    c7 = c6;
+  }
+
+  do {
+    float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
+    float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
+    float32x4_t vacc1x0123 = vacc0x0123;
+    float32x4_t vacc1x4567 = vacc0x4567;
+    float32x4_t vacc2x0123 = vacc0x0123;
+    float32x4_t vacc2x4567 = vacc0x4567;
+    float32x4_t vacc3x0123 = vacc0x0123;
+    float32x4_t vacc3x4567 = vacc0x4567;
+    float32x4_t vacc4x0123 = vacc0x0123;
+    float32x4_t vacc4x4567 = vacc0x4567;
+    float32x4_t vacc5x0123 = vacc0x0123;
+    float32x4_t vacc5x4567 = vacc0x4567;
+    float32x4_t vacc6x0123 = vacc0x0123;
+    float32x4_t vacc6x4567 = vacc0x4567;
+    float32x4_t vacc7x0123 = vacc0x0123;
+    float32x4_t vacc7x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      const float* restrict a6 = a[6];
+      assert(a6 != NULL);
+      if XNN_UNPREDICTABLE(a6 != zero) {
+        a6 = (const float*) ((uintptr_t) a6 + a_offset);
+      }
+      const float* restrict a7 = a[7];
+      assert(a7 != NULL);
+      if XNN_UNPREDICTABLE(a7 != zero) {
+        a7 = (const float*) ((uintptr_t) a7 + a_offset);
+      }
+      a += 8;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        float32x4_t va0 = vld1q_f32(a0); a0 += 4;
+        float32x4_t va1 = vld1q_f32(a1); a1 += 4;
+        float32x4_t va2 = vld1q_f32(a2); a2 += 4;
+        float32x4_t va3 = vld1q_f32(a3); a3 += 4;
+        float32x4_t va4 = vld1q_f32(a4); a4 += 4;
+        float32x4_t va5 = vld1q_f32(a5); a5 += 4;
+        float32x4_t va6 = vld1q_f32(a6); a6 += 4;
+        float32x4_t va7 = vld1q_f32(a7); a7 += 4;
+
+
+        const float32x4_t vb0123c0 = vld1q_f32(w + 0);
+        const float32x4_t vb4567c0 = vld1q_f32(w + 4);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c0);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c0);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c0);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c0);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c0);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c0);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c0);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c0);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c0);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c0);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c0);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c0);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c0);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c0);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c1 = vld1q_f32(w + 8);
+        const float32x4_t vb4567c1 = vld1q_f32(w + 12);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c1);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c1);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c1);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c1);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c1);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c1);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c1);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c1);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c1);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c1);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c1);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c1);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c1);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c1);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c2 = vld1q_f32(w + 16);
+        const float32x4_t vb4567c2 = vld1q_f32(w + 20);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c2);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c2);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c2);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c2);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c2);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c2);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c2);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c2);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c2);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c2);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c2);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c2);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c2);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c2);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2);
+
+        va0 = vextq_f32(va0, va0, 1);
+        va1 = vextq_f32(va1, va1, 1);
+        va2 = vextq_f32(va2, va2, 1);
+        va3 = vextq_f32(va3, va3, 1);
+        va4 = vextq_f32(va4, va4, 1);
+        va5 = vextq_f32(va5, va5, 1);
+        va6 = vextq_f32(va6, va6, 1);
+        va7 = vextq_f32(va7, va7, 1);
+
+        const float32x4_t vb0123c3 = vld1q_f32(w + 24);
+        const float32x4_t vb4567c3 = vld1q_f32(w + 28);
+
+        vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123c3);
+        vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123c3);
+        vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123c3);
+        vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123c3);
+        vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123c3);
+        vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123c3);
+        vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123c3);
+        vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c3);
+        vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567c3);
+        vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567c3);
+        vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567c3);
+        vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567c3);
+        vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567c3);
+        vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567c3);
+        vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567c3);
+        vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c3);
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
+          const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
+          const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
+          const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
+          const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
+          const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
+          const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
+          const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
+
+          const float32x4_t vb0123 = vld1q_f32(w); w += 4;
+          const float32x4_t vb4567 = vld1q_f32(w); w += 4;
+
+          vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
+          vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
+          vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
+          vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
+          vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
+          vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
+          vacc6x0123 = vfmaq_f32(vacc6x0123, va6, vb0123);
+          vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123);
+          vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
+          vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
+          vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
+          vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
+          vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
+          vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
+          vacc6x4567 = vfmaq_f32(vacc6x4567, va6, vb4567);
+          vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567);
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= 8 * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    vacc0x0123 = vminq_f32(vacc0x0123, vmax);
+    vacc1x0123 = vminq_f32(vacc1x0123, vmax);
+    vacc2x0123 = vminq_f32(vacc2x0123, vmax);
+    vacc3x0123 = vminq_f32(vacc3x0123, vmax);
+    vacc4x0123 = vminq_f32(vacc4x0123, vmax);
+    vacc5x0123 = vminq_f32(vacc5x0123, vmax);
+    vacc6x0123 = vminq_f32(vacc6x0123, vmax);
+    vacc7x0123 = vminq_f32(vacc7x0123, vmax);
+    vacc0x4567 = vminq_f32(vacc0x4567, vmax);
+    vacc1x4567 = vminq_f32(vacc1x4567, vmax);
+    vacc2x4567 = vminq_f32(vacc2x4567, vmax);
+    vacc3x4567 = vminq_f32(vacc3x4567, vmax);
+    vacc4x4567 = vminq_f32(vacc4x4567, vmax);
+    vacc5x4567 = vminq_f32(vacc5x4567, vmax);
+    vacc6x4567 = vminq_f32(vacc6x4567, vmax);
+    vacc7x4567 = vminq_f32(vacc7x4567, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
+    vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
+    vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
+    vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
+    vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
+    vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
+    vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
+    vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
+    vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
+    vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
+    vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
+    vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
+    vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
+    vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
+    vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
+    vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      vst1q_f32(c7, vacc7x0123);
+      vst1q_f32(c7 + 4, vacc7x4567);
+      c7 = (float*) ((uintptr_t) c7 + cn_stride);
+      vst1q_f32(c6, vacc6x0123);
+      vst1q_f32(c6 + 4, vacc6x4567);
+      c6 = (float*) ((uintptr_t) c6 + cn_stride);
+      vst1q_f32(c5, vacc5x0123);
+      vst1q_f32(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      vst1q_f32(c4, vacc4x0123);
+      vst1q_f32(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      vst1q_f32(c3, vacc3x0123);
+      vst1q_f32(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      vst1q_f32(c2, vacc2x0123);
+      vst1q_f32(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      vst1q_f32(c1, vacc1x0123);
+      vst1q_f32(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      vst1q_f32(c0, vacc0x0123);
+      vst1q_f32(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_f32(c7, vacc7x0123); c7 += 4;
+        vst1q_f32(c6, vacc6x0123); c6 += 4;
+        vst1q_f32(c5, vacc5x0123); c5 += 4;
+        vst1q_f32(c4, vacc4x0123); c4 += 4;
+        vst1q_f32(c3, vacc3x0123); c3 += 4;
+        vst1q_f32(c2, vacc2x0123); c2 += 4;
+        vst1q_f32(c1, vacc1x0123); c1 += 4;
+        vst1q_f32(c0, vacc0x0123); c0 += 4;
+
+        vacc7x0123 = vacc7x4567;
+        vacc6x0123 = vacc6x4567;
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+      }
+      float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
+      float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
+      float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
+      float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
+      float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
+      float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
+      float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
+      float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
+      if (nc & 2) {
+        vst1_f32(c7, vacc7x01); c7 += 2;
+        vst1_f32(c6, vacc6x01); c6 += 2;
+        vst1_f32(c5, vacc5x01); c5 += 2;
+        vst1_f32(c4, vacc4x01); c4 += 2;
+        vst1_f32(c3, vacc3x01); c3 += 2;
+        vst1_f32(c2, vacc2x01); c2 += 2;
+        vst1_f32(c1, vacc1x01); c1 += 2;
+        vst1_f32(c0, vacc0x01); c0 += 2;
+
+        vacc7x01 = vget_high_f32(vacc7x0123);
+        vacc6x01 = vget_high_f32(vacc6x0123);
+        vacc5x01 = vget_high_f32(vacc5x0123);
+        vacc4x01 = vget_high_f32(vacc4x0123);
+        vacc3x01 = vget_high_f32(vacc3x0123);
+        vacc2x01 = vget_high_f32(vacc2x0123);
+        vacc1x01 = vget_high_f32(vacc1x0123);
+        vacc0x01 = vget_high_f32(vacc0x0123);
+      }
+      if (nc & 1) {
+        vst1_lane_f32(c7, vacc7x01, 0);
+        vst1_lane_f32(c6, vacc6x01, 0);
+        vst1_lane_f32(c5, vacc5x01, 0);
+        vst1_lane_f32(c4, vacc4x01, 0);
+        vst1_lane_f32(c3, vacc3x01, 0);
+        vst1_lane_f32(c2, vacc2x01, 0);
+        vst1_lane_f32(c1, vacc1x01, 0);
+        vst1_lane_f32(c0, vacc0x01, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/neon-shuffle.c.in b/src/f32-igemm/neon-shuffle.c.in
new file mode 100644
index 0000000..57e22a7
--- /dev/null
+++ b/src/f32-igemm/neon-shuffle.c.in
@@ -0,0 +1,163 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert NR % 4 == 0
+$IDLETTERS = "0123456789IDLETTERSDEFGHIJKLMN"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_${MR}x${NR}s4__${"neonfma" if FMA else "neon"}(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (${MR} * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  $for M in range(1, MR):
+    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        c${M} = c${M-1};
+      }
+
+  do {
+    $for N in range(0, NR, 4):
+      float32x4_t vacc0x${IDLETTERS[N:N+4]} = vld1q_f32(w); w += 4;
+    $for M in range(1, MR):
+      $for N in range(0, NR, 4):
+        float32x4_t vacc${M}x${IDLETTERS[N:N+4]} = vacc0x${IDLETTERS[N:N+4]};
+
+    size_t p = ks;
+    do {
+      $for M in range(MR):
+        const float* restrict a${M} = a[${M}];
+        assert(a${M} != NULL);
+        if XNN_UNPREDICTABLE(a${M} != zero) {
+          a${M} = (const float*) ((uintptr_t) a${M} + a_offset);
+        }
+      a += ${MR};
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        $for M in range(MR):
+          float32x4_t va${M} = vld1q_f32(a${M}); a${M} += 4;
+
+        $for L in range(4):
+
+          $for N in range(0, NR, 4):
+            const float32x4_t vb${IDLETTERS[N:N+4]}c${L} = vld1q_f32(w + ${L * NR + N});
+
+          $for N in range(0, NR, 4):
+            $for M in range(MR):
+              vacc${M}x${IDLETTERS[N:N+4]} = ${VMULADDQ_F32}(vacc${M}x${IDLETTERS[N:N+4]}, va${M}, vb${IDLETTERS[N:N+4]}c${L});
+
+          $if L + 1 != 4:
+            $for M in range(MR):
+              va${M} = vextq_f32(va${M}, va${M}, 1);
+
+        w += ${4 * NR};
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          $for M in range(MR):
+            const float32x4_t va${M} = vld1q_dup_f32(a${M}); a${M} += 1;
+
+          $for N in range(0, NR, 4):
+            const float32x4_t vb${IDLETTERS[N:N+4]} = vld1q_f32(w); w += 4;
+
+          $for N in range(0, NR, 4):
+            $for M in range(MR):
+              vacc${M}x${IDLETTERS[N:N+4]} = ${VMULADDQ_F32}(vacc${M}x${IDLETTERS[N:N+4]}, va${M}, vb${IDLETTERS[N:N+4]});
+
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+
+      p -= ${MR} * sizeof(void*);
+    } while (p != 0);
+
+    const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+    $for N in range(0, NR, 4):
+      $for M in range(MR):
+        vacc${M}x${IDLETTERS[N:N+4]} = vminq_f32(vacc${M}x${IDLETTERS[N:N+4]}, vmax);
+
+    const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+    $for N in range(0, NR, 4):
+      $for M in range(MR):
+        vacc${M}x${IDLETTERS[N:N+4]} = vmaxq_f32(vacc${M}x${IDLETTERS[N:N+4]}, vmin);
+
+    if XNN_LIKELY(nc >= ${NR}) {
+      $for M in reversed(range(MR)):
+        vst1q_f32(c${M}, vacc${M}x${IDLETTERS[0:4]});
+        $for N in range(4, NR, 4):
+          vst1q_f32(c${M} + ${N}, vacc${M}x${IDLETTERS[N:N+4]});
+        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= ${NR};
+    } else {
+      $for LOG2N in reversed(range(NR.bit_length())):
+        $if LOG2N == 1:
+          $for M in reversed(range(MR)):
+            float32x2_t vacc${M}x${IDLETTERS[0:2]} = vget_low_f32(vacc${M}x${IDLETTERS[0:4]});
+        $if 1 << LOG2N != NR:
+          if (nc & ${1 << LOG2N}) {
+            $if LOG2N >= 2:
+              $for N in range(0, 1 << LOG2N, 4):
+                $for M in reversed(range(MR)):
+                  vst1q_f32(c${M}, vacc${M}x${IDLETTERS[N:N+4]}); c${M} += 4;
+
+              $for M in reversed(range(MR)):
+                $for N in range(0, 1 << (LOG2N - 1), 4):
+                  vacc${M}x${IDLETTERS[N:N+4]} = vacc${M}x${IDLETTERS[N + (1 << LOG2N):N + (1 << LOG2N)+4]};
+            $elif LOG2N == 1:
+              $for M in reversed(range(MR)):
+                vst1_f32(c${M}, vacc${M}x${IDLETTERS[0:2]}); c${M} += 2;
+
+              $for M in reversed(range(MR)):
+                vacc${M}x${IDLETTERS[0:2]} = vget_high_f32(vacc${M}x${IDLETTERS[0:4]});
+            $elif LOG2N == 0:
+              $for M in reversed(range(MR)):
+                vst1_lane_f32(c${M}, vacc${M}x${IDLETTERS[0:2]}, 0);
+          }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/init.c b/src/init.c
index 083b978..eb8220b 100644
--- a/src/init.c
+++ b/src/init.c
@@ -324,8 +324,6 @@
           break;
         case cpuinfo_uarch_cortex_a75:
         case cpuinfo_uarch_cortex_a76:
-        case cpuinfo_uarch_mongoose_m1:
-        case cpuinfo_uarch_mongoose_m2:
         case cpuinfo_uarch_meerkat_m3:
         case (cpuinfo_uarch_meerkat_m3 + 1):
           xnn_params.f32.gemm = (struct gemm_parameters) {
@@ -337,6 +335,20 @@
             .nr = 8,
           };
           break;
+
+        case cpuinfo_uarch_mongoose_m1:
+        case cpuinfo_uarch_mongoose_m2:
+          xnn_params.f32.gemm = (struct gemm_parameters) {
+            .gemm = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_6x8s4__neonfma,
+            .igemm = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_6x8s4__neonfma,
+            .gemm1 = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_1x8s4__neonfma,
+            .igemm1 = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_1x8s4__neonfma,
+            .mr = 6,
+            .nr = 8,
+            .log2_sr = 2,
+          };
+          break;
+
         case cpuinfo_uarch_cortex_a53:
         case cpuinfo_uarch_cortex_a55:
           xnn_params.f32.gemm = (struct gemm_parameters) {
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index 604be8a..0df00f5 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -43,6 +43,8 @@
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__psimd_splat)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_dup)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_load1)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neonfma)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__psimd)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__sse)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_2x4__scalar)
@@ -64,6 +66,8 @@
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__psimd_splat)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__sse_dup)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__sse_load1)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neonfma)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__psimd)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__sse)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
@@ -79,7 +83,11 @@
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_ld64)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_loadsplat)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_splat)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neonfma)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__psimd)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neonfma)
 
 #define DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(fn_name) \
   XNN_INTERNAL void fn_name(                          \
@@ -107,6 +115,8 @@
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__psimd_splat)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_dup)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_load1)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neonfma)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__psimd)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__sse)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_2x4__scalar)
@@ -125,6 +135,8 @@
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__psimd_splat)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_dup)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_load1)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neonfma)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__psimd)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__sse)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__aarch64_neonfma_cortex_a75)
@@ -140,7 +152,11 @@
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_ld64)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_splat)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neonfma)
 DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__psimd)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neonfma)
 
 
 #define DECLARE_F16_GEMM_UKERNEL_FUNCTION(fn_name) \
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 5c7e9bc..398545b 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -45,6 +45,8 @@
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__psimd_splat)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_dup)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_load1)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neon)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neonfma)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__psimd)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__sse)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_2x4__scalar)
@@ -67,6 +69,8 @@
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__psimd_splat)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_dup)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_load1)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__neon)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__neonfma)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__psimd)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__sse)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
@@ -78,7 +82,11 @@
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neonfma_ld64)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_loadsplat)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_splat)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__neon)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__neonfma)
 DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__psimd)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8s4__neon)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8s4__neonfma)
 
 
 #define DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(fn_name) \