Auto-generate SSE versions of DWCONV2D CHW 3x3p1 micro-kernels

PiperOrigin-RevId: 338798065
diff --git a/BUILD.bazel b/BUILD.bazel
index 4a984f1..113f32e 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1808,7 +1808,16 @@
     "src/f32-dwconv/gen/up8x9-minmax-sse.c",
     "src/f32-dwconv/gen/up8x25-minmax-sse-acc2.c",
     "src/f32-dwconv/gen/up8x25-minmax-sse.c",
-    "src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c",
     "src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c",
     "src/f32-gavgpool-cw/sse-x4.c",
     "src/f32-gavgpool/7p7x-minmax-sse-c4.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 270f1e0..0fe3137 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1273,6 +1273,16 @@
   src/f32-dwconv/gen/up8x9-minmax-sse.c
   src/f32-dwconv/gen/up8x25-minmax-sse-acc2.c
   src/f32-dwconv/gen/up8x25-minmax-sse.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
+  src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c
   src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
   src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
   src/f32-gavgpool-cw/sse-x4.c
diff --git a/bench/f32-dwconv2d-chw.cc b/bench/f32-dwconv2d-chw.cc
index 7b1233d..2681e21 100644
--- a/bench/f32-dwconv2d-chw.cc
+++ b/bench/f32-dwconv2d-chw.cc
@@ -168,15 +168,52 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  static void dwconv2d_chw_3x3p1__sse_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_6x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2, 3, 3, 1, 1);
+  }
   static void dwconv2d_chw_3x3p1__sse_1x4_acc3(benchmark::State& state, const char* net) {
     DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3, 3, 3, 1, 1);
   }
+  static void dwconv2d_chw_3x3p1__sse_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__sse_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2, 3, 3, 1, 1);
+  }
 
   static void dwconv2d_chw_3x3s2p1__sse_1x4_acc3(benchmark::State& state, const char* net) {
     DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3, 3, 3, 1, 2);
   }
 
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_6x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_1x4_acc2)
   BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_2x4_acc2)
+
   BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_1x4_acc3)
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
diff --git a/scripts/generate-f32-dwconv2d-chw.sh b/scripts/generate-f32-dwconv2d-chw.sh
new file mode 100755
index 0000000..d52c771
--- /dev/null
+++ b/scripts/generate-f32-dwconv2d-chw.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+################################### x86 SSE ###################################
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-sse.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c
+
+################################## Unit tests #################################
+tools/generate-dwconv2d-chw-test.py --spec test/f32-dwconv2d-chw.yaml --output test/f32-dwconv2d-chw.cc
diff --git a/scripts/generate-tests.sh b/scripts/generate-tests.sh
index a86ea34..edf8549 100755
--- a/scripts/generate-tests.sh
+++ b/scripts/generate-tests.sh
@@ -32,6 +32,3 @@
 
 ### Tests for ArgMaxPool micro-kernels
 tools/generate-argmaxpool-test.py --spec test/f32-argmaxpool.yaml --output test/f32-argmaxpool.cc
-
-### Tests for DWConv2D CHW micro-kernels
-tools/generate-dwconv2d-chw-test.py --spec test/f32-dwconv2d-chw.yaml --output test/f32-dwconv2d-chw.cc
diff --git a/src/f32-dwconv2d-chw/3x3p1-sse.c.in b/src/f32-dwconv2d-chw/3x3p1-sse.c.in
new file mode 100644
index 0000000..1efaacf
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3p1-sse.c.in
@@ -0,0 +1,235 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  $for M in range(2, 2 + ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+  size_t output_height = input_height;
+  do {
+    $for M in range(2, 2 + ROW_TILE):
+      if XNN_UNPREDICTABLE(output_height < ${M}) {
+        i${M} = zero;
+        $if M <= ROW_TILE:
+          o${M-1} = o${M-2};
+      }
+
+    $for M in range(2 + ROW_TILE):
+      // vi${M}x3012 = ( vi${M}2, vi${M}1, vi{M}0, vi{M}3 )
+      __m128 vi${M}x3012 = _mm_setzero_ps();
+
+    $for M in range(2 + ROW_TILE):
+      __m128 vi${M}x4567 = _mm_loadu_ps(i${M});
+      i${M} += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x89AB = ( vi${M}B, vi${M}A, vi${M}9, vi${M}8 )
+        const __m128 vi${M}x89AB = _mm_loadu_ps(i${M});
+        i${M} += 4;
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x7456 = ( vi${M}6, vi${M}5, vi${M}4, vi${M}7 )
+        const __m128 vi${M}x7456 = _mm_shuffle_ps(vi${M}x4567, vi${M}x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K == 0:
+            __m128 vo${M}p0 = _mm_add_ps(vbias, _mm_mul_ps(vi${M+K}x4567, vk${K}1));
+          $elif K < ACCUMULATORS:
+            __m128 vo${M}p${K} = _mm_mul_ps(vi${M+K}x4567, vk${K}1);
+          $else:
+            vo${M}p${K % ACCUMULATORS} = _mm_add_ps(vo${M}p${K % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x4567, vk${K}1));
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x3456 = ( vi${M}6, vi${M}5, vi${M}4, vi${M}3 )
+        const __m128 vi${M}x3456 = _mm_move_ss(vi${M}x7456, vi${M}x3012);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K+3 < ACCUMULATORS:
+            __m128 vo${M}p${K+3} = _mm_mul_ps(vi${M+K}x3456, vk${K}0);
+          $else:
+            vo${M}p${(K+3) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+3) % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x3456, vk${K}0));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x3012 = vi${M}x7456;
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x8567 = ( vi${M}7, vi${M}6, vi${M}5, vi${M}8 )
+        const __m128 vi${M}x8567 = _mm_move_ss(vi${M}x4567, vi${M}x89AB);
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x5678 = ( vi${M}8, vi${M}7, vi${M}6, vi${M}5 )
+        const __m128 vi${M}x5678 = _mm_shuffle_ps(vi${M}x8567, vi${M}x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          vo${M}p${(K+6) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+6) % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x5678, vk${K}2));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = vi${M}x89AB;
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = _mm_add_ps(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $for M in range(ROW_TILE):
+        __m128 vo${M} = _mm_max_ps(vo${M}p0, vmin);
+
+      $for M in range(ROW_TILE):
+        vo${M} = _mm_min_ps(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        _mm_storeu_ps(o${M}, vo${M});
+        o${M} += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = _mm_and_ps(vmask, vi${M}x4567);
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x7456 = ( vi${M}6, vi${M}5, vi${M}4, vi${M}7 )
+        const __m128 vi${M}x7456 = _mm_shuffle_ps(vi${M}x4567, vi${M}x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K == 0:
+            __m128 vo${M}p0 = _mm_add_ps(vbias, _mm_mul_ps(vi${M+K}x4567, vk${K}1));
+          $elif K < ACCUMULATORS:
+            __m128 vo${M}p${K} = _mm_mul_ps(vi${M+K}x4567, vk${K}1);
+          $else:
+            vo${M}p${K % ACCUMULATORS} = _mm_add_ps(vo${M}p${K % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x4567, vk${K}1));
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x3456 = ( vi${M}6, vi${M}5, vi${M}4, vi${M}3 )
+        const __m128 vi${M}x3456 = _mm_move_ss(vi${M}x7456, vi${M}x3012);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K+3 < ACCUMULATORS:
+            __m128 vo${M}p${K+3} = _mm_mul_ps(vi${M+K}x3456, vk${K}0);
+          $else:
+            vo${M}p${(K+3) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+3) % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x3456, vk${K}0));
+
+      const __m128 vzero = _mm_setzero_ps();
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x8567 = ( vi${M}7, vi${M}6, vi${M}5, 0.0 )
+        const __m128 vi${M}x8567 = _mm_move_ss(vi${M}x4567, vzero);
+
+      $for M in range(2 + ROW_TILE):
+        // vi${M}x5678 = ( vi${M}8, vi${M}7, vi${M}6, vi${M}5 )
+        const __m128 vi${M}x5678 = _mm_shuffle_ps(vi${M}x8567, vi${M}x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          vo${M}p${(K+6) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+6) % ACCUMULATORS}, _mm_mul_ps(vi${M+K}x5678, vk${K}2));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = _mm_add_ps(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $for M in range(ROW_TILE):
+        __m128 vo${M} = _mm_max_ps(vo${M}p0, vmin);
+
+      $for M in range(ROW_TILE):
+        vo${M} = _mm_min_ps(vo${M}, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        $for M in reversed(range(ROW_TILE)):
+          _mm_storeu_ps(o${M}, vo${M});
+          o${M} += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            _mm_storel_pi((__m64*) o${M}, vo${M});
+            o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = _mm_movehl_ps(vo${M}, vo${M});
+        }
+        if (w & (1 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            _mm_store_ss(o${M}, vo${M});
+            o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
+    $for M in range(2, 2 + ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c
similarity index 71%
copy from src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c
index 4001834..0d3f10e 100644
--- a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc2.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
 #include <xnnpack/math.h>
 
 
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2(
     size_t input_height,
     size_t input_width,
     const float* input,
@@ -47,39 +51,37 @@
   const float* i1 = input;
   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
 
+  float* o0 = output;
+
   size_t output_height = input_height;
   do {
-    if XNN_UNPREDICTABLE(output_height == 1) {
+    if XNN_UNPREDICTABLE(output_height < 2) {
       i2 = zero;
     }
 
-    // vi0x3012 = ( vi02, vi01, vi00, vi03 )
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
     __m128 vi0x3012 = _mm_setzero_ps();
-    // vi1x3012 = ( vi12, vi11, vi10, vi13 )
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
     __m128 vi1x3012 = _mm_setzero_ps();
-    // vi2x3012 = ( vi22, vi21, vi20, vi13 )
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
     __m128 vi2x3012 = _mm_setzero_ps();
-    // vi0x4567 = ( vi07, vi06, vi05, vi04 )
+
     __m128 vi0x4567 = _mm_loadu_ps(i0);
     i0 += 4;
-    // vi1x4567 = ( vi17, vi16, vi15, vi14 )
     __m128 vi1x4567 = _mm_loadu_ps(i1);
     i1 += 4;
-    // vi2x4567 = ( vi27, vi26, vi25, vi24 )
     __m128 vi2x4567 = _mm_loadu_ps(i2);
     i2 += 4;
 
     size_t w = input_width;
     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      __m128 vo4567p0 = vbias;
-
       // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
       const __m128 vi0x89AB = _mm_loadu_ps(i0);
       i0 += 4;
-      // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
       const __m128 vi1x89AB = _mm_loadu_ps(i1);
       i1 += 4;
-      // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
       const __m128 vi2x89AB = _mm_loadu_ps(i2);
       i2 += 4;
 
@@ -90,9 +92,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -101,9 +103,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
 
       vi0x3012 = vi0x7456;
       vi1x3012 = vi1x7456;
@@ -123,29 +125,27 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
       vi0x4567 = vi0x89AB;
       vi1x4567 = vi1x89AB;
       vi2x4567 = vi2x89AB;
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
 
-      _mm_storeu_ps(output, vo);
-      output += 4;
+      vo0 = _mm_min_ps(vo0, vmax);
+
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
     }
     // Always process the last block of 1..4 pixels.
     assert(w >= 1 * sizeof(float));
     assert(w <= 4 * sizeof(float));
     {
-      __m128 vo4567p0 = vbias;
-
       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
@@ -157,9 +157,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -168,9 +168,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
 
       const __m128 vzero = _mm_setzero_ps();
       // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
@@ -187,28 +187,29 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
 
       if XNN_LIKELY(w == 4 * sizeof(float)) {
-        _mm_storeu_ps(output, vo);
-        output += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
       } else {
         if (w & (2 * sizeof(float))) {
-          _mm_storel_pi((__m64*) output, vo);
-          output += 2;
-          vo = _mm_movehl_ps(vo, vo);
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
         }
         if (w & (1 * sizeof(float))) {
-          _mm_store_ss(output, vo);
-          output += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
         }
       }
     }
@@ -216,5 +217,7 @@
     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
     i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
   } while (--output_height != 0);
 }
diff --git a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
similarity index 72%
rename from src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
rename to src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
index 4001834..0362b11 100644
--- a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
@@ -47,39 +51,37 @@
   const float* i1 = input;
   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
 
+  float* o0 = output;
+
   size_t output_height = input_height;
   do {
-    if XNN_UNPREDICTABLE(output_height == 1) {
+    if XNN_UNPREDICTABLE(output_height < 2) {
       i2 = zero;
     }
 
-    // vi0x3012 = ( vi02, vi01, vi00, vi03 )
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
     __m128 vi0x3012 = _mm_setzero_ps();
-    // vi1x3012 = ( vi12, vi11, vi10, vi13 )
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
     __m128 vi1x3012 = _mm_setzero_ps();
-    // vi2x3012 = ( vi22, vi21, vi20, vi13 )
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
     __m128 vi2x3012 = _mm_setzero_ps();
-    // vi0x4567 = ( vi07, vi06, vi05, vi04 )
+
     __m128 vi0x4567 = _mm_loadu_ps(i0);
     i0 += 4;
-    // vi1x4567 = ( vi17, vi16, vi15, vi14 )
     __m128 vi1x4567 = _mm_loadu_ps(i1);
     i1 += 4;
-    // vi2x4567 = ( vi27, vi26, vi25, vi24 )
     __m128 vi2x4567 = _mm_loadu_ps(i2);
     i2 += 4;
 
     size_t w = input_width;
     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      __m128 vo4567p0 = vbias;
-
       // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
       const __m128 vi0x89AB = _mm_loadu_ps(i0);
       i0 += 4;
-      // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
       const __m128 vi1x89AB = _mm_loadu_ps(i1);
       i1 += 4;
-      // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
       const __m128 vi2x89AB = _mm_loadu_ps(i2);
       i2 += 4;
 
@@ -90,9 +92,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -101,9 +103,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
 
       vi0x3012 = vi0x7456;
       vi1x3012 = vi1x7456;
@@ -123,29 +125,28 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
 
       vi0x4567 = vi0x89AB;
       vi1x4567 = vi1x89AB;
       vi2x4567 = vi2x89AB;
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
 
-      _mm_storeu_ps(output, vo);
-      output += 4;
+      vo0 = _mm_min_ps(vo0, vmax);
+
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
     }
     // Always process the last block of 1..4 pixels.
     assert(w >= 1 * sizeof(float));
     assert(w <= 4 * sizeof(float));
     {
-      __m128 vo4567p0 = vbias;
-
       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
@@ -157,9 +158,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -168,9 +169,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
 
       const __m128 vzero = _mm_setzero_ps();
       // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
@@ -187,28 +188,30 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
 
       if XNN_LIKELY(w == 4 * sizeof(float)) {
-        _mm_storeu_ps(output, vo);
-        output += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
       } else {
         if (w & (2 * sizeof(float))) {
-          _mm_storel_pi((__m64*) output, vo);
-          output += 2;
-          vo = _mm_movehl_ps(vo, vo);
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
         }
         if (w & (1 * sizeof(float))) {
-          _mm_store_ss(output, vo);
-          output += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
         }
       }
     }
@@ -216,5 +219,7 @@
     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
     i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
   } while (--output_height != 0);
 }
diff --git a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
similarity index 71%
copy from src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
index 4001834..4aa9ce3 100644
--- a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
 #include <xnnpack/math.h>
 
 
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4(
     size_t input_height,
     size_t input_width,
     const float* input,
@@ -47,39 +51,37 @@
   const float* i1 = input;
   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
 
+  float* o0 = output;
+
   size_t output_height = input_height;
   do {
-    if XNN_UNPREDICTABLE(output_height == 1) {
+    if XNN_UNPREDICTABLE(output_height < 2) {
       i2 = zero;
     }
 
-    // vi0x3012 = ( vi02, vi01, vi00, vi03 )
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
     __m128 vi0x3012 = _mm_setzero_ps();
-    // vi1x3012 = ( vi12, vi11, vi10, vi13 )
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
     __m128 vi1x3012 = _mm_setzero_ps();
-    // vi2x3012 = ( vi22, vi21, vi20, vi13 )
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
     __m128 vi2x3012 = _mm_setzero_ps();
-    // vi0x4567 = ( vi07, vi06, vi05, vi04 )
+
     __m128 vi0x4567 = _mm_loadu_ps(i0);
     i0 += 4;
-    // vi1x4567 = ( vi17, vi16, vi15, vi14 )
     __m128 vi1x4567 = _mm_loadu_ps(i1);
     i1 += 4;
-    // vi2x4567 = ( vi27, vi26, vi25, vi24 )
     __m128 vi2x4567 = _mm_loadu_ps(i2);
     i2 += 4;
 
     size_t w = input_width;
     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      __m128 vo4567p0 = vbias;
-
       // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
       const __m128 vi0x89AB = _mm_loadu_ps(i0);
       i0 += 4;
-      // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
       const __m128 vi1x89AB = _mm_loadu_ps(i1);
       i1 += 4;
-      // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
       const __m128 vi2x89AB = _mm_loadu_ps(i2);
       i2 += 4;
 
@@ -90,9 +92,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -101,9 +103,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      __m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
 
       vi0x3012 = vi0x7456;
       vi1x3012 = vi1x7456;
@@ -123,29 +125,29 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
+      vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
       vi0x4567 = vi0x89AB;
       vi1x4567 = vi1x89AB;
       vi2x4567 = vi2x89AB;
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo0p2 = _mm_add_ps(vo0p2, vo0p3);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
 
-      _mm_storeu_ps(output, vo);
-      output += 4;
+      vo0 = _mm_min_ps(vo0, vmax);
+
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
     }
     // Always process the last block of 1..4 pixels.
     assert(w >= 1 * sizeof(float));
     assert(w <= 4 * sizeof(float));
     {
-      __m128 vo4567p0 = vbias;
-
       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
@@ -157,9 +159,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -168,9 +170,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      __m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
 
       const __m128 vzero = _mm_setzero_ps();
       // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
@@ -187,28 +189,31 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
+      vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo0p2 = _mm_add_ps(vo0p2, vo0p3);
+      vo0p0 = _mm_add_ps(vo0p0, vo0p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
 
       if XNN_LIKELY(w == 4 * sizeof(float)) {
-        _mm_storeu_ps(output, vo);
-        output += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
       } else {
         if (w & (2 * sizeof(float))) {
-          _mm_storel_pi((__m64*) output, vo);
-          output += 2;
-          vo = _mm_movehl_ps(vo, vo);
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
         }
         if (w & (1 * sizeof(float))) {
-          _mm_store_ss(output, vo);
-          output += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
         }
       }
     }
@@ -216,5 +221,7 @@
     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
     i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
   } while (--output_height != 0);
 }
diff --git a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c
similarity index 71%
copy from src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c
index 4001834..c7106cb 100644
--- a/src/f32-dwconv2d-chw/3x3p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
 #include <xnnpack/math.h>
 
 
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4(
     size_t input_height,
     size_t input_width,
     const float* input,
@@ -47,39 +51,37 @@
   const float* i1 = input;
   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
 
+  float* o0 = output;
+
   size_t output_height = input_height;
   do {
-    if XNN_UNPREDICTABLE(output_height == 1) {
+    if XNN_UNPREDICTABLE(output_height < 2) {
       i2 = zero;
     }
 
-    // vi0x3012 = ( vi02, vi01, vi00, vi03 )
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
     __m128 vi0x3012 = _mm_setzero_ps();
-    // vi1x3012 = ( vi12, vi11, vi10, vi13 )
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
     __m128 vi1x3012 = _mm_setzero_ps();
-    // vi2x3012 = ( vi22, vi21, vi20, vi13 )
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
     __m128 vi2x3012 = _mm_setzero_ps();
-    // vi0x4567 = ( vi07, vi06, vi05, vi04 )
+
     __m128 vi0x4567 = _mm_loadu_ps(i0);
     i0 += 4;
-    // vi1x4567 = ( vi17, vi16, vi15, vi14 )
     __m128 vi1x4567 = _mm_loadu_ps(i1);
     i1 += 4;
-    // vi2x4567 = ( vi27, vi26, vi25, vi24 )
     __m128 vi2x4567 = _mm_loadu_ps(i2);
     i2 += 4;
 
     size_t w = input_width;
     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      __m128 vo4567p0 = vbias;
-
       // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
       const __m128 vi0x89AB = _mm_loadu_ps(i0);
       i0 += 4;
-      // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
       const __m128 vi1x89AB = _mm_loadu_ps(i1);
       i1 += 4;
-      // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
       const __m128 vi2x89AB = _mm_loadu_ps(i2);
       i2 += 4;
 
@@ -90,9 +92,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -101,9 +103,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
 
       vi0x3012 = vi0x7456;
       vi1x3012 = vi1x7456;
@@ -123,29 +125,26 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
       vi0x4567 = vi0x89AB;
       vi1x4567 = vi1x89AB;
       vi2x4567 = vi2x89AB;
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
 
-      _mm_storeu_ps(output, vo);
-      output += 4;
+      vo0 = _mm_min_ps(vo0, vmax);
+
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
     }
     // Always process the last block of 1..4 pixels.
     assert(w >= 1 * sizeof(float));
     assert(w <= 4 * sizeof(float));
     {
-      __m128 vo4567p0 = vbias;
-
       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
@@ -157,9 +156,9 @@
       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
-      __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
-      __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
 
       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
@@ -168,9 +167,9 @@
       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
 
       const __m128 vzero = _mm_setzero_ps();
       // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
@@ -187,28 +186,28 @@
       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
 
-      vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
-      vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
-      vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
 
-      __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
-      vo = _mm_add_ps(vo, vo4567p2);
 
-      vo = _mm_max_ps(vo, vmin);
-      vo = _mm_min_ps(vo, vmax);
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
 
       if XNN_LIKELY(w == 4 * sizeof(float)) {
-        _mm_storeu_ps(output, vo);
-        output += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
       } else {
         if (w & (2 * sizeof(float))) {
-          _mm_storel_pi((__m64*) output, vo);
-          output += 2;
-          vo = _mm_movehl_ps(vo, vo);
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
         }
         if (w & (1 * sizeof(float))) {
-          _mm_store_ss(output, vo);
-          output += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
         }
       }
     }
@@ -216,5 +215,7 @@
     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
     i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
   } while (--output_height != 0);
 }
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c
new file mode 100644
index 0000000..171fa28
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c
@@ -0,0 +1,292 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo1p0 = _mm_add_ps(vo1p0, vo1p1);
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
+      __m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
+      vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+
+      vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+      vo1p0 = _mm_add_ps(vo1p0, vo1p1);
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c
new file mode 100644
index 0000000..13afcc5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4.c
@@ -0,0 +1,288 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c
new file mode 100644
index 0000000..1e0fef5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-3x4.c
@@ -0,0 +1,353 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+    // vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
+    __m128 vi4x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+    __m128 vi4x4567 = _mm_loadu_ps(i4);
+    i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+      // vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
+      const __m128 vi4x89AB = _mm_loadu_ps(i4);
+      i4 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+      vi4x3012 = vi4x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+      // vi4x8567 = ( vi47, vi46, vi45, vi48 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+
+      _mm_storeu_ps(o2, vo2);
+      o2 += 4;
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+      vi4x4567 = _mm_and_ps(vmask, vi4x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+      // vi4x8567 = ( vi47, vi46, vi45, 0.0 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o2, vo2);
+        o2 += 4;
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o2, vo2);
+          o2 += 2;
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+          vo2 = _mm_movehl_ps(vo2, vo2);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o2, vo2);
+          o2 += 1;
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c
new file mode 100644
index 0000000..3227e0e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-4x4.c
@@ -0,0 +1,418 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+    // vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
+    __m128 vi4x3012 = _mm_setzero_ps();
+    // vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
+    __m128 vi5x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+    __m128 vi4x4567 = _mm_loadu_ps(i4);
+    i4 += 4;
+    __m128 vi5x4567 = _mm_loadu_ps(i5);
+    i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+      // vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
+      const __m128 vi4x89AB = _mm_loadu_ps(i4);
+      i4 += 4;
+      // vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
+      const __m128 vi5x89AB = _mm_loadu_ps(i5);
+      i5 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+      vi4x3012 = vi4x7456;
+      vi5x3012 = vi5x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+      // vi4x8567 = ( vi47, vi46, vi45, vi48 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
+      // vi5x8567 = ( vi57, vi56, vi55, vi58 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+
+      _mm_storeu_ps(o3, vo3);
+      o3 += 4;
+      _mm_storeu_ps(o2, vo2);
+      o2 += 4;
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+      vi4x4567 = _mm_and_ps(vmask, vi4x4567);
+      vi5x4567 = _mm_and_ps(vmask, vi5x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+      // vi4x8567 = ( vi47, vi46, vi45, 0.0 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
+      // vi5x8567 = ( vi57, vi56, vi55, 0.0 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o3, vo3);
+        o3 += 4;
+        _mm_storeu_ps(o2, vo2);
+        o2 += 4;
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o3, vo3);
+          o3 += 2;
+          _mm_storel_pi((__m64*) o2, vo2);
+          o2 += 2;
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+          vo2 = _mm_movehl_ps(vo2, vo2);
+          vo3 = _mm_movehl_ps(vo3, vo3);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o3, vo3);
+          o3 += 1;
+          _mm_store_ss(o2, vo2);
+          o2 += 1;
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c
new file mode 100644
index 0000000..20f30cb
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-5x4.c
@@ -0,0 +1,483 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+    // vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
+    __m128 vi4x3012 = _mm_setzero_ps();
+    // vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
+    __m128 vi5x3012 = _mm_setzero_ps();
+    // vi6x3012 = ( vi62, vi61, vi{M}0, vi{M}3 )
+    __m128 vi6x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+    __m128 vi4x4567 = _mm_loadu_ps(i4);
+    i4 += 4;
+    __m128 vi5x4567 = _mm_loadu_ps(i5);
+    i5 += 4;
+    __m128 vi6x4567 = _mm_loadu_ps(i6);
+    i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+      // vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
+      const __m128 vi4x89AB = _mm_loadu_ps(i4);
+      i4 += 4;
+      // vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
+      const __m128 vi5x89AB = _mm_loadu_ps(i5);
+      i5 += 4;
+      // vi6x89AB = ( vi6B, vi6A, vi69, vi68 )
+      const __m128 vi6x89AB = _mm_loadu_ps(i6);
+      i6 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi6x7456 = ( vi66, vi65, vi64, vi67 )
+      const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+      // vi6x3456 = ( vi66, vi65, vi64, vi63 )
+      const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+      vi4x3012 = vi4x7456;
+      vi5x3012 = vi5x7456;
+      vi6x3012 = vi6x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+      // vi4x8567 = ( vi47, vi46, vi45, vi48 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
+      // vi5x8567 = ( vi57, vi56, vi55, vi58 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
+      // vi6x8567 = ( vi67, vi66, vi65, vi68 )
+      const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vi6x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi6x5678 = ( vi68, vi67, vi66, vi65 )
+      const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+      __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+      vo4 = _mm_min_ps(vo4, vmax);
+
+      _mm_storeu_ps(o4, vo4);
+      o4 += 4;
+      _mm_storeu_ps(o3, vo3);
+      o3 += 4;
+      _mm_storeu_ps(o2, vo2);
+      o2 += 4;
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+      vi4x4567 = _mm_and_ps(vmask, vi4x4567);
+      vi5x4567 = _mm_and_ps(vmask, vi5x4567);
+      vi6x4567 = _mm_and_ps(vmask, vi6x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi6x7456 = ( vi66, vi65, vi64, vi67 )
+      const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+      // vi6x3456 = ( vi66, vi65, vi64, vi63 )
+      const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+      // vi4x8567 = ( vi47, vi46, vi45, 0.0 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
+      // vi5x8567 = ( vi57, vi56, vi55, 0.0 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
+      // vi6x8567 = ( vi67, vi66, vi65, 0.0 )
+      const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi6x5678 = ( vi68, vi67, vi66, vi65 )
+      const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+      __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+      vo4 = _mm_min_ps(vo4, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o4, vo4);
+        o4 += 4;
+        _mm_storeu_ps(o3, vo3);
+        o3 += 4;
+        _mm_storeu_ps(o2, vo2);
+        o2 += 4;
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o4, vo4);
+          o4 += 2;
+          _mm_storel_pi((__m64*) o3, vo3);
+          o3 += 2;
+          _mm_storel_pi((__m64*) o2, vo2);
+          o2 += 2;
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+          vo2 = _mm_movehl_ps(vo2, vo2);
+          vo3 = _mm_movehl_ps(vo3, vo3);
+          vo4 = _mm_movehl_ps(vo4, vo4);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o4, vo4);
+          o4 += 1;
+          _mm_store_ss(o3, vo3);
+          o3 += 1;
+          _mm_store_ss(o2, vo2);
+          o2 += 1;
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c
new file mode 100644
index 0000000..b9d5b83
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-6x4.c
@@ -0,0 +1,548 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
+  const __m128 vmax = _mm_load_ps(params->sse.max);
+  const __m128 vmin = _mm_load_ps(params->sse.min);
+
+  const __m128 vbias = _mm_load1_ps(weights);
+  const __m128 vk00 = _mm_load1_ps(weights + 1);
+  const __m128 vk01 = _mm_load1_ps(weights + 2);
+  const __m128 vk02 = _mm_load1_ps(weights + 3);
+  const __m128 vk10 = _mm_load1_ps(weights + 4);
+  const __m128 vk11 = _mm_load1_ps(weights + 5);
+  const __m128 vk12 = _mm_load1_ps(weights + 6);
+  const __m128 vk20 = _mm_load1_ps(weights + 7);
+  const __m128 vk21 = _mm_load1_ps(weights + 8);
+  const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+  float* o5 = (float*) ((uintptr_t) o4 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+      o5 = o4;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i7 = zero;
+    }
+
+    // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
+    __m128 vi0x3012 = _mm_setzero_ps();
+    // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
+    __m128 vi1x3012 = _mm_setzero_ps();
+    // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
+    __m128 vi2x3012 = _mm_setzero_ps();
+    // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
+    __m128 vi3x3012 = _mm_setzero_ps();
+    // vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
+    __m128 vi4x3012 = _mm_setzero_ps();
+    // vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
+    __m128 vi5x3012 = _mm_setzero_ps();
+    // vi6x3012 = ( vi62, vi61, vi{M}0, vi{M}3 )
+    __m128 vi6x3012 = _mm_setzero_ps();
+    // vi7x3012 = ( vi72, vi71, vi{M}0, vi{M}3 )
+    __m128 vi7x3012 = _mm_setzero_ps();
+
+    __m128 vi0x4567 = _mm_loadu_ps(i0);
+    i0 += 4;
+    __m128 vi1x4567 = _mm_loadu_ps(i1);
+    i1 += 4;
+    __m128 vi2x4567 = _mm_loadu_ps(i2);
+    i2 += 4;
+    __m128 vi3x4567 = _mm_loadu_ps(i3);
+    i3 += 4;
+    __m128 vi4x4567 = _mm_loadu_ps(i4);
+    i4 += 4;
+    __m128 vi5x4567 = _mm_loadu_ps(i5);
+    i5 += 4;
+    __m128 vi6x4567 = _mm_loadu_ps(i6);
+    i6 += 4;
+    __m128 vi7x4567 = _mm_loadu_ps(i7);
+    i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
+      const __m128 vi0x89AB = _mm_loadu_ps(i0);
+      i0 += 4;
+      // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
+      const __m128 vi1x89AB = _mm_loadu_ps(i1);
+      i1 += 4;
+      // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
+      const __m128 vi2x89AB = _mm_loadu_ps(i2);
+      i2 += 4;
+      // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
+      const __m128 vi3x89AB = _mm_loadu_ps(i3);
+      i3 += 4;
+      // vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
+      const __m128 vi4x89AB = _mm_loadu_ps(i4);
+      i4 += 4;
+      // vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
+      const __m128 vi5x89AB = _mm_loadu_ps(i5);
+      i5 += 4;
+      // vi6x89AB = ( vi6B, vi6A, vi69, vi68 )
+      const __m128 vi6x89AB = _mm_loadu_ps(i6);
+      i6 += 4;
+      // vi7x89AB = ( vi7B, vi7A, vi79, vi78 )
+      const __m128 vi7x89AB = _mm_loadu_ps(i7);
+      i7 += 4;
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi6x7456 = ( vi66, vi65, vi64, vi67 )
+      const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi7x7456 = ( vi76, vi75, vi74, vi77 )
+      const __m128 vi7x7456 = _mm_shuffle_ps(vi7x4567, vi7x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
+      __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+      // vi6x3456 = ( vi66, vi65, vi64, vi63 )
+      const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
+      // vi7x3456 = ( vi76, vi75, vi74, vi73 )
+      const __m128 vi7x3456 = _mm_move_ss(vi7x7456, vi7x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
+
+      vi0x3012 = vi0x7456;
+      vi1x3012 = vi1x7456;
+      vi2x3012 = vi2x7456;
+      vi3x3012 = vi3x7456;
+      vi4x3012 = vi4x7456;
+      vi5x3012 = vi5x7456;
+      vi6x3012 = vi6x7456;
+      vi7x3012 = vi7x7456;
+
+      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
+      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
+      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
+      // vi3x8567 = ( vi37, vi36, vi35, vi38 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
+      // vi4x8567 = ( vi47, vi46, vi45, vi48 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
+      // vi5x8567 = ( vi57, vi56, vi55, vi58 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
+      // vi6x8567 = ( vi67, vi66, vi65, vi68 )
+      const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vi6x89AB);
+      // vi7x8567 = ( vi77, vi76, vi75, vi78 )
+      const __m128 vi7x8567 = _mm_move_ss(vi7x4567, vi7x89AB);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi6x5678 = ( vi68, vi67, vi66, vi65 )
+      const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi7x5678 = ( vi78, vi77, vi76, vi75 )
+      const __m128 vi7x5678 = _mm_shuffle_ps(vi7x8567, vi7x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+      vi7x4567 = vi7x89AB;
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+      __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+      __m128 vo5 = _mm_max_ps(vo5p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+      vo4 = _mm_min_ps(vo4, vmax);
+      vo5 = _mm_min_ps(vo5, vmax);
+
+      _mm_storeu_ps(o5, vo5);
+      o5 += 4;
+      _mm_storeu_ps(o4, vo4);
+      o4 += 4;
+      _mm_storeu_ps(o3, vo3);
+      o3 += 4;
+      _mm_storeu_ps(o2, vo2);
+      o2 += 4;
+      _mm_storeu_ps(o1, vo1);
+      o1 += 4;
+      _mm_storeu_ps(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = _mm_and_ps(vmask, vi0x4567);
+      vi1x4567 = _mm_and_ps(vmask, vi1x4567);
+      vi2x4567 = _mm_and_ps(vmask, vi2x4567);
+      vi3x4567 = _mm_and_ps(vmask, vi3x4567);
+      vi4x4567 = _mm_and_ps(vmask, vi4x4567);
+      vi5x4567 = _mm_and_ps(vmask, vi5x4567);
+      vi6x4567 = _mm_and_ps(vmask, vi6x4567);
+      vi7x4567 = _mm_and_ps(vmask, vi7x4567);
+
+      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
+      const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
+      const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
+      const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi3x7456 = ( vi36, vi35, vi34, vi37 )
+      const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi4x7456 = ( vi46, vi45, vi44, vi47 )
+      const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi5x7456 = ( vi56, vi55, vi54, vi57 )
+      const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi6x7456 = ( vi66, vi65, vi64, vi67 )
+      const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
+      // vi7x7456 = ( vi76, vi75, vi74, vi77 )
+      const __m128 vi7x7456 = _mm_shuffle_ps(vi7x4567, vi7x4567, _MM_SHUFFLE(2, 1, 0, 3));
+
+      __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
+      __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
+      __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
+      __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
+      __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
+      __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
+
+      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
+      const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
+      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
+      const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
+      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
+      const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
+      // vi3x3456 = ( vi36, vi35, vi34, vi33 )
+      const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
+      // vi4x3456 = ( vi46, vi45, vi44, vi43 )
+      const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
+      // vi5x3456 = ( vi56, vi55, vi54, vi53 )
+      const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
+      // vi6x3456 = ( vi66, vi65, vi64, vi63 )
+      const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
+      // vi7x3456 = ( vi76, vi75, vi74, vi73 )
+      const __m128 vi7x3456 = _mm_move_ss(vi7x7456, vi7x3012);
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
+
+      const __m128 vzero = _mm_setzero_ps();
+      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
+      const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
+      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
+      const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
+      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
+      const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
+      // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
+      const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
+      // vi4x8567 = ( vi47, vi46, vi45, 0.0 )
+      const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
+      // vi5x8567 = ( vi57, vi56, vi55, 0.0 )
+      const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
+      // vi6x8567 = ( vi67, vi66, vi65, 0.0 )
+      const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vzero);
+      // vi7x8567 = ( vi77, vi76, vi75, 0.0 )
+      const __m128 vi7x8567 = _mm_move_ss(vi7x4567, vzero);
+
+      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
+      const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
+      const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
+      const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi3x5678 = ( vi38, vi37, vi36, vi35 )
+      const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi4x5678 = ( vi48, vi47, vi46, vi45 )
+      const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi5x5678 = ( vi58, vi57, vi56, vi55 )
+      const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi6x5678 = ( vi68, vi67, vi66, vi65 )
+      const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
+      // vi7x5678 = ( vi78, vi77, vi76, vi75 )
+      const __m128 vi7x5678 = _mm_shuffle_ps(vi7x8567, vi7x8567, _MM_SHUFFLE(0, 3, 2, 1));
+
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
+      vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
+      vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
+      vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
+      vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
+      vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
+      vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
+
+
+      __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+      __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+      __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+      __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+      __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+      __m128 vo5 = _mm_max_ps(vo5p0, vmin);
+
+      vo0 = _mm_min_ps(vo0, vmax);
+      vo1 = _mm_min_ps(vo1, vmax);
+      vo2 = _mm_min_ps(vo2, vmax);
+      vo3 = _mm_min_ps(vo3, vmax);
+      vo4 = _mm_min_ps(vo4, vmax);
+      vo5 = _mm_min_ps(vo5, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        _mm_storeu_ps(o5, vo5);
+        o5 += 4;
+        _mm_storeu_ps(o4, vo4);
+        o4 += 4;
+        _mm_storeu_ps(o3, vo3);
+        o3 += 4;
+        _mm_storeu_ps(o2, vo2);
+        o2 += 4;
+        _mm_storeu_ps(o1, vo1);
+        o1 += 4;
+        _mm_storeu_ps(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          _mm_storel_pi((__m64*) o5, vo5);
+          o5 += 2;
+          _mm_storel_pi((__m64*) o4, vo4);
+          o4 += 2;
+          _mm_storel_pi((__m64*) o3, vo3);
+          o3 += 2;
+          _mm_storel_pi((__m64*) o2, vo2);
+          o2 += 2;
+          _mm_storel_pi((__m64*) o1, vo1);
+          o1 += 2;
+          _mm_storel_pi((__m64*) o0, vo0);
+          o0 += 2;
+
+          vo0 = _mm_movehl_ps(vo0, vo0);
+          vo1 = _mm_movehl_ps(vo1, vo1);
+          vo2 = _mm_movehl_ps(vo2, vo2);
+          vo3 = _mm_movehl_ps(vo3, vo3);
+          vo4 = _mm_movehl_ps(vo4, vo4);
+          vo5 = _mm_movehl_ps(vo5, vo5);
+        }
+        if (w & (1 * sizeof(float))) {
+          _mm_store_ss(o5, vo5);
+          o5 += 1;
+          _mm_store_ss(o4, vo4);
+          o4 += 1;
+          _mm_store_ss(o3, vo3);
+          o3 += 1;
+          _mm_store_ss(o2, vo2);
+          o2 += 1;
+          _mm_store_ss(o1, vo1);
+          o1 += 1;
+          _mm_store_ss(o0, vo0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o5;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+    o5 = (float*) ((uintptr_t) o4 + input_width);
+
+    output_height = doz(output_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/init.c b/src/init.c
index c3c2a5b..a882545 100644
--- a/src/init.c
+++ b/src/init.c
@@ -1901,10 +1901,10 @@
         .output_width_tile = 2,
       };
       xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
-        .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3,
+        .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2,
         .input_width_tile = 4,
         .output_width_tile = 4,
-        .output_height_tile = 1,
+        .output_height_tile = 2,
       };
       xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3,
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index 3e09081..f151f1f 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -346,7 +346,16 @@
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc3)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_3x4)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3)
 
 
diff --git a/test/f32-dwconv2d-chw.cc b/test/f32-dwconv2d-chw.cc
index 4f9eb5b..d338795 100644
--- a/test/f32-dwconv2d-chw.cc
+++ b/test/f32-dwconv2d-chw.cc
@@ -526,6 +526,812 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4, output_height_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_height_div_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_height_lt_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4, output_height_gt_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_height_div_3) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_height_lt_3) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_3X4, output_height_gt_3) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_height_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_height_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_4X4, output_height_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_height_div_5) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_height_lt_5) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_5X4, output_height_gt_5) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(6)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_height_div_6) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_height_lt_6) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 6; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_6X4, output_height_gt_6) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC2, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC2, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC2, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC2, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC2, output_height_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC3, output_width_eq_4) {
     TEST_REQUIRES_X86_SSE;
     DWConv2DMicrokernelTester()
@@ -614,6 +1420,220 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC4, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC4, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC4, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC4, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_1X4_ACC4, output_height_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_width_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_width_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_width_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_width_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_height_div_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_height_lt_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__SSE_2X4_ACC2, output_height_gt_2) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_eq_4) {
     TEST_REQUIRES_X86_SSE;
     for (size_t input_width = 7; input_width < 9; input_width++) {
diff --git a/test/f32-dwconv2d-chw.yaml b/test/f32-dwconv2d-chw.yaml
index fd34278..470bf7a 100644
--- a/test/f32-dwconv2d-chw.yaml
+++ b/test/f32-dwconv2d-chw.yaml
@@ -14,7 +14,16 @@
 - name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_1x4_acc2
   arch:
     - aarch64
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__psimd_1x4_acc3
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__psimd_1x4_acc3