Auto-generate SSE versions of DWCONV2D CHW 3x3s2p1 micro-kernels
PiperOrigin-RevId: 338962367
diff --git a/BUILD.bazel b/BUILD.bazel
index 1fe18fc..f16eb95 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1846,7 +1846,14 @@
"src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c",
"src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c",
"src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c",
- "src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c",
+ "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c",
"src/f32-gavgpool-cw/sse-x4.c",
"src/f32-gavgpool/7p7x-minmax-sse-c4.c",
"src/f32-gavgpool/7x-minmax-sse-c4.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 41f612f..a7a6a20 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1311,7 +1311,14 @@
src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc3.c
src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-1x4-acc4.c
src/f32-dwconv2d-chw/gen/3x3p1-minmax-sse-2x4-acc2.c
- src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c
+ src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c
src/f32-gavgpool-cw/sse-x4.c
src/f32-gavgpool/7p7x-minmax-sse-c4.c
src/f32-gavgpool/7x-minmax-sse-c4.c
diff --git a/bench/f32-dwconv2d-chw.cc b/bench/f32-dwconv2d-chw.cc
index a3c75ac..6a30b25 100644
--- a/bench/f32-dwconv2d-chw.cc
+++ b/bench/f32-dwconv2d-chw.cc
@@ -316,9 +316,30 @@
DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2, 3, 3, 1, 1, benchmark::utils::CheckSSSE3);
}
+ static void dwconv2d_chw_3x3s2p1__sse_1x4(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4, 3, 3, 1, 2);
+ }
+ static void dwconv2d_chw_3x3s2p1__sse_2x4(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4, 3, 3, 1, 2);
+ }
+ static void dwconv2d_chw_3x3s2p1__sse_3x4(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4, 3, 3, 1, 2);
+ }
+ static void dwconv2d_chw_3x3s2p1__sse_4x4(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4, 3, 3, 1, 2);
+ }
+ static void dwconv2d_chw_3x3s2p1__sse_1x4_acc2(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2, 3, 3, 1, 2);
+ }
static void dwconv2d_chw_3x3s2p1__sse_1x4_acc3(benchmark::State& state, const char* net) {
DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3, 3, 3, 1, 2);
}
+ static void dwconv2d_chw_3x3s2p1__sse_1x4_acc4(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4, 3, 3, 1, 2);
+ }
+ static void dwconv2d_chw_3x3s2p1__sse_2x4_acc2(benchmark::State& state, const char* net) {
+ DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2, 3, 3, 1, 2);
+ }
BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_1x4)
BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__sse_2x4)
@@ -342,7 +363,14 @@
BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__ssse3_1x4_acc4)
BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__ssse3_2x4_acc2)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_1x4)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_2x4)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_3x4)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_4x4)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_1x4_acc2)
BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_1x4_acc3)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_1x4_acc4)
+ BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__sse_2x4_acc2)
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
diff --git a/scripts/generate-f32-dwconv2d-chw.sh b/scripts/generate-f32-dwconv2d-chw.sh
index a6623ec..47f5887 100755
--- a/scripts/generate-f32-dwconv2d-chw.sh
+++ b/scripts/generate-f32-dwconv2d-chw.sh
@@ -54,6 +54,16 @@
tools/xngen src/f32-dwconv2d-chw/3x3p1-ssse3.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-ssse3-1x4-acc4.c
tools/xngen src/f32-dwconv2d-chw/3x3p1-ssse3.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-ssse3-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-sse.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c
+
################################### Scalar ####################################
tools/xngen src/f32-dwconv2d-chw/3x3p1-scalar.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-scalar-1x1.c
tools/xngen src/f32-dwconv2d-chw/3x3p1-scalar.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-scalar-2x1.c
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-sse.c.in b/src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
new file mode 100644
index 0000000..78ec93f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
@@ -0,0 +1,229 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ $if ROW_TILE > 1:
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ $for M in range(2, 1 + 2 * ROW_TILE):
+ const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+ float* o0 = output;
+ $for M in range(1, ROW_TILE):
+ float* o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ $for M in range(2, 1 + 2 * ROW_TILE):
+ if XNN_UNPREDICTABLE(padded_input_height < ${2 + M}) {
+ i${M} = zero;
+ $if M % 2 == 1:
+ o${(M - 1) / 2} = o${(M - 1) / 2 - 1};
+ }
+
+ $for M in range(1 + 2 * ROW_TILE):
+ __m128 vi${M}x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x89AB = _mm_loadu_ps(i${M});
+ const __m128 vi${M}xCDEF = _mm_loadu_ps(i${M} + 4);
+ i${M} += 8;
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x8ACE = _mm_shuffle_ps(vi${M}x89AB, vi${M}xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi${M}x9BDF = _mm_shuffle_ps(vi${M}x89AB, vi${M}xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ $if K == 0:
+ __m128 vo${M}p0 = _mm_add_ps(vbias, _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1));
+ $elif K < ACCUMULATORS:
+ __m128 vo${M}p${K} = _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1);
+ $else:
+ vo${M}p${K % ACCUMULATORS} = _mm_add_ps(vo${M}p${K % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1));
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}xF9BD = _mm_shuffle_ps(vi${M}x9BDF, vi${M}x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ $if K+3 < ACCUMULATORS:
+ __m128 vo${M}p${K+3} = _mm_mul_ps(vi${2*M+K}x9BDF, vk${K}2);
+ $else:
+ vo${M}p${(K+3) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+3) % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x9BDF, vk${K}2));
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x7BDF = _mm_move_ss(vi${M}xF9BD, vi${M}x7531);
+
+ $for M in range(1 + 2 * ROW_TILE):
+ vi${M}x7531 = vi${M}xF9BD;
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ vo${M}p${(K+6) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+6) % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x7BDF, vk${K}0));
+
+ $if ACCUMULATORS > 1:
+ $ACC_SLICE = 1
+ $while ACC_SLICE < ACCUMULATORS:
+ $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+ $if A + ACC_SLICE < ACCUMULATORS:
+ $for M in range(ROW_TILE):
+ vo${M}p${A} = _mm_add_ps(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+ $ACC_SLICE *= 2
+
+ $for M in range(ROW_TILE):
+ __m128 vo${M} = _mm_max_ps(vo${M}p0, vmin);
+
+ $for M in range(ROW_TILE):
+ vo${M} = _mm_min_ps(vo${M}, vmax);
+
+ $for M in reversed(range(ROW_TILE)):
+ _mm_storeu_ps(o${M}, vo${M});
+ o${M} += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x89AB = _mm_loadu_ps(i${M});
+ const __m128 vi${M}xCDEF = _mm_loadu_ps(i${M} + 4);
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi${M}x89AB, vi${M}xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi${M}x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi${M}x89AB, vi${M}xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ $if K == 0:
+ __m128 vo${M}p0 = _mm_add_ps(vbias, _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1));
+ $elif K < ACCUMULATORS:
+ __m128 vo${M}p${K} = _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1);
+ $else:
+ vo${M}p${K % ACCUMULATORS} = _mm_add_ps(vo${M}p${K % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x8ACE, vk${K}1));
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}xF9BD = _mm_shuffle_ps(vi${M}x9BDF, vi${M}x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ $if K+3 < ACCUMULATORS:
+ __m128 vo${M}p${K+3} = _mm_mul_ps(vi${2*M+K}x9BDF, vk${K}2);
+ $else:
+ vo${M}p${(K+3) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+3) % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x9BDF, vk${K}2));
+
+ $for M in range(1 + 2 * ROW_TILE):
+ const __m128 vi${M}x7BDF = _mm_move_ss(vi${M}xF9BD, vi${M}x7531);
+
+ $for M in range(1 + 2 * ROW_TILE):
+ vi${M}x7531 = vi${M}xF9BD;
+
+ $for K in range(3):
+ $for M in range(ROW_TILE):
+ vo${M}p${(K+6) % ACCUMULATORS} = _mm_add_ps(vo${M}p${(K+6) % ACCUMULATORS}, _mm_mul_ps(vi${2*M+K}x7BDF, vk${K}0));
+
+ $if ACCUMULATORS > 1:
+ $ACC_SLICE = 1
+ $while ACC_SLICE < ACCUMULATORS:
+ $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+ $if A + ACC_SLICE < ACCUMULATORS:
+ $for M in range(ROW_TILE):
+ vo${M}p${A} = _mm_add_ps(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+ $ACC_SLICE *= 2
+
+ $for M in range(ROW_TILE):
+ __m128 vo${M} = _mm_max_ps(vo${M}p0, vmin);
+
+ $for M in range(ROW_TILE):
+ vo${M} = _mm_min_ps(vo${M}, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ $for M in reversed(range(ROW_TILE)):
+ _mm_storeu_ps(o${M}, vo${M});
+ o${M} += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ $for M in reversed(range(ROW_TILE)):
+ _mm_storel_pi((__m64*) o${M}, vo${M});
+ o${M} += 2;
+
+ $for M in range(ROW_TILE):
+ vo${M} = _mm_movehl_ps(vo${M}, vo${M});
+ }
+ if (w & (2 * sizeof(float))) {
+ $for M in reversed(range(ROW_TILE)):
+ _mm_store_ss(o${M}, vo${M});
+ o${M} += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i${2 * ROW_TILE} - input_decrement);
+ $for M in range(1, 1 + 2 * ROW_TILE):
+ i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+ $if ROW_TILE > 1:
+ o0 = o${ROW_TILE - 1};
+ $for M in range(1, ROW_TILE):
+ o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+ $if ROW_TILE > 1:
+ output_height = doz(output_height, ${ROW_TILE});
+ padded_input_height = doz(padded_input_height, ${ROW_TILE * 2});
+ $else:
+ output_height -= 1;
+ padded_input_height -= 2;
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c
similarity index 74%
rename from src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
rename to src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c
index 3b8aea7..e56b4f7 100644
--- a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc2.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
#include <xnnpack/math.h>
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
@@ -52,10 +56,12 @@
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ float* o0 = output;
+
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
- if XNN_UNPREDICTABLE(padded_input_height <= 3) {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
@@ -65,16 +71,13 @@
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
@@ -85,17 +88,17 @@
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
@@ -105,30 +108,27 @@
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
- _mm_storeu_ps(output, vo);
- output += 4;
+ vo0 = _mm_min_ps(vo0, vmax);
+
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
@@ -138,45 +138,50 @@
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
- _mm_storeu_ps(output, vo);
- output += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
- _mm_storel_pi((__m64*) output, vo);
- output += 2;
- vo = _mm_movehl_ps(vo, vo);
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
- _mm_store_ss(output, vo);
- output += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
}
}
}
@@ -185,6 +190,7 @@
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
+
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c
similarity index 74%
copy from src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c
index 3b8aea7..5854f4f 100644
--- a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc3.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
@@ -52,10 +56,12 @@
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ float* o0 = output;
+
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
- if XNN_UNPREDICTABLE(padded_input_height <= 3) {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
@@ -65,16 +71,13 @@
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
@@ -85,17 +88,17 @@
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
@@ -105,30 +108,28 @@
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x7BDF, vk20));
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p2);
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
- _mm_storeu_ps(output, vo);
- output += 4;
+ vo0 = _mm_min_ps(vo0, vmax);
+
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
@@ -138,45 +139,51 @@
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x7BDF, vk20));
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p2);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
- _mm_storeu_ps(output, vo);
- output += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
- _mm_storel_pi((__m64*) output, vo);
- output += 2;
- vo = _mm_movehl_ps(vo, vo);
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
- _mm_store_ss(output, vo);
- output += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
}
}
}
@@ -185,6 +192,7 @@
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
+
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c
similarity index 73%
copy from src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c
index 3b8aea7..f98d126 100644
--- a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4-acc4.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
#include <xnnpack/math.h>
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
@@ -52,10 +56,12 @@
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ float* o0 = output;
+
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
- if XNN_UNPREDICTABLE(padded_input_height <= 3) {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
@@ -65,16 +71,13 @@
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
@@ -85,17 +88,17 @@
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ __m128 vo0p3 = _mm_mul_ps(vi0x9BDF, vk02);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
@@ -105,30 +108,29 @@
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo0p2 = _mm_add_ps(vo0p2, vo0p3);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p2);
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
- _mm_storeu_ps(output, vo);
- output += 4;
+ vo0 = _mm_min_ps(vo0, vmax);
+
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
@@ -138,45 +140,52 @@
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ __m128 vo0p3 = _mm_mul_ps(vi0x9BDF, vk02);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo0p2 = _mm_add_ps(vo0p2, vo0p3);
+ vo0p0 = _mm_add_ps(vo0p0, vo0p2);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
- _mm_storeu_ps(output, vo);
- output += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
- _mm_storel_pi((__m64*) output, vo);
- output += 2;
- vo = _mm_movehl_ps(vo, vo);
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
- _mm_store_ss(output, vo);
- output += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
}
}
}
@@ -185,6 +194,7 @@
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
+
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c
similarity index 74%
copy from src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
copy to src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c
index 3b8aea7..62f8633 100644
--- a/src/f32-dwconv2d-chw/3x3s2p1-sse-1x4-acc3.c
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-1x4.c
@@ -1,4 +1,8 @@
-// Copyright 2019 Google LLC
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
@@ -11,7 +15,7 @@
#include <xnnpack/math.h>
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3(
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4(
size_t input_height,
size_t input_width,
const float* input,
@@ -52,10 +56,12 @@
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ float* o0 = output;
+
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
- if XNN_UNPREDICTABLE(padded_input_height <= 3) {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
@@ -65,16 +71,13 @@
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
@@ -85,17 +88,17 @@
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
@@ -105,30 +108,26 @@
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
- _mm_storeu_ps(output, vo);
- output += 4;
+ vo0 = _mm_min_ps(vo0, vmax);
+
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
- __m128 vo8ACEp0 = vbias;
-
const __m128 vi0x89AB = _mm_loadu_ps(i0);
- const __m128 vi1x89AB = _mm_loadu_ps(i1);
- const __m128 vi2x89AB = _mm_loadu_ps(i2);
-
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
@@ -138,45 +137,49 @@
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x8ACE, vk01));
- __m128 vo8ACEp1 = _mm_mul_ps(vi1x8ACE, vk11);
- __m128 vo8ACEp2 = _mm_mul_ps(vi2x8ACE, vk21);
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x9BDF, vk02));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x9BDF, vk12));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x9BDF, vk22));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
- vo8ACEp0 = _mm_add_ps(vo8ACEp0, _mm_mul_ps(vi0x7BDF, vk00));
- vo8ACEp1 = _mm_add_ps(vo8ACEp1, _mm_mul_ps(vi1x7BDF, vk10));
- vo8ACEp2 = _mm_add_ps(vo8ACEp2, _mm_mul_ps(vi2x7BDF, vk20));
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
- __m128 vo = _mm_add_ps(vo8ACEp0, vo8ACEp1);
- vo = _mm_add_ps(vo, vo8ACEp2);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
- vo = _mm_max_ps(vo, vmin);
- vo = _mm_min_ps(vo, vmax);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
- _mm_storeu_ps(output, vo);
- output += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
- _mm_storel_pi((__m64*) output, vo);
- output += 2;
- vo = _mm_movehl_ps(vo, vo);
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
- _mm_store_ss(output, vo);
- output += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
}
}
}
@@ -185,6 +188,7 @@
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
+
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c
new file mode 100644
index 0000000..af74f94
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4-acc2.c
@@ -0,0 +1,277 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo1p1 = _mm_mul_ps(vi3x8ACE, vk11);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi4x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo1p0 = _mm_add_ps(vo1p0, vo1p1);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
+ __m128 vo1p1 = _mm_mul_ps(vi3x8ACE, vk11);
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi4x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+
+ vo0p0 = _mm_add_ps(vo0p0, vo0p1);
+ vo1p0 = _mm_add_ps(vo1p0, vo1p1);
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+ o0 = o1;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+
+ output_height = doz(output_height, 2);
+ padded_input_height = doz(padded_input_height, 4);
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c
new file mode 100644
index 0000000..7a86c18
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-2x4.c
@@ -0,0 +1,273 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+ o0 = o1;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+
+ output_height = doz(output_height, 2);
+ padded_input_height = doz(padded_input_height, 4);
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c
new file mode 100644
index 0000000..879cfb1
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-3x4.c
@@ -0,0 +1,349 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+ const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+ const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+ float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 7) {
+ i5 = zero;
+ o2 = o1;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 8) {
+ i6 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+ __m128 vi5x7531 = _mm_setzero_ps();
+ __m128 vi6x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ i5 += 8;
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ i6 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi5x8ACE = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi5x9BDF = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi6x8ACE = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi6x9BDF = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi5x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi5x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi6x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi6x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o2, vo2);
+ o2 += 2;
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ vo2 = _mm_movehl_ps(vo2, vo2);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o2, vo2);
+ o2 += 1;
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+ i5 = (const float*) ((uintptr_t) i4 + input_width);
+ i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+ o0 = o2;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+ o2 = (float*) ((uintptr_t) o1 + output_width);
+
+ output_height = doz(output_height, 3);
+ padded_input_height = doz(padded_input_height, 6);
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c
new file mode 100644
index 0000000..7e5b10a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-4x4.c
@@ -0,0 +1,425 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+ const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+ const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+ const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+ const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+ float* o2 = (float*) ((uintptr_t) o1 + output_width);
+ float* o3 = (float*) ((uintptr_t) o2 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 7) {
+ i5 = zero;
+ o2 = o1;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 8) {
+ i6 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 9) {
+ i7 = zero;
+ o3 = o2;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 10) {
+ i8 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+ __m128 vi5x7531 = _mm_setzero_ps();
+ __m128 vi6x7531 = _mm_setzero_ps();
+ __m128 vi7x7531 = _mm_setzero_ps();
+ __m128 vi8x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ i5 += 8;
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ i6 += 8;
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ i7 += 8;
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+ i8 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi5x8ACE = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi5x9BDF = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi6x8ACE = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi6x9BDF = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi8x8ACE = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi8x9BDF = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi5x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi5x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi6x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi6x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi8x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi8x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o3, vo3);
+ o3 += 2;
+ _mm_storel_pi((__m64*) o2, vo2);
+ o2 += 2;
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ vo2 = _mm_movehl_ps(vo2, vo2);
+ vo3 = _mm_movehl_ps(vo3, vo3);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o3, vo3);
+ o3 += 1;
+ _mm_store_ss(o2, vo2);
+ o2 += 1;
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i8 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+ i5 = (const float*) ((uintptr_t) i4 + input_width);
+ i6 = (const float*) ((uintptr_t) i5 + input_width);
+ i7 = (const float*) ((uintptr_t) i6 + input_width);
+ i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+ o0 = o3;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+ o2 = (float*) ((uintptr_t) o1 + output_width);
+ o3 = (float*) ((uintptr_t) o2 + output_width);
+
+ output_height = doz(output_height, 4);
+ padded_input_height = doz(padded_input_height, 8);
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-5x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-5x4.c
new file mode 100644
index 0000000..29e33a8
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-5x4.c
@@ -0,0 +1,501 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+ const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+ const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+ const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+ const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+ const float* i9 = (const float*) ((uintptr_t) i8 + input_width);
+ const float* i10 = (const float*) ((uintptr_t) i9 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+ float* o2 = (float*) ((uintptr_t) o1 + output_width);
+ float* o3 = (float*) ((uintptr_t) o2 + output_width);
+ float* o4 = (float*) ((uintptr_t) o3 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 7) {
+ i5 = zero;
+ o2 = o1;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 8) {
+ i6 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 9) {
+ i7 = zero;
+ o3 = o2;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 10) {
+ i8 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 11) {
+ i9 = zero;
+ o4 = o3;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 12) {
+ i10 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+ __m128 vi5x7531 = _mm_setzero_ps();
+ __m128 vi6x7531 = _mm_setzero_ps();
+ __m128 vi7x7531 = _mm_setzero_ps();
+ __m128 vi8x7531 = _mm_setzero_ps();
+ __m128 vi9x7531 = _mm_setzero_ps();
+ __m128 vi10x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ i5 += 8;
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ i6 += 8;
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ i7 += 8;
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+ i8 += 8;
+ const __m128 vi9x89AB = _mm_loadu_ps(i9);
+ const __m128 vi9xCDEF = _mm_loadu_ps(i9 + 4);
+ i9 += 8;
+ const __m128 vi10x89AB = _mm_loadu_ps(i10);
+ const __m128 vi10xCDEF = _mm_loadu_ps(i10 + 4);
+ i10 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi5x8ACE = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi5x9BDF = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi6x8ACE = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi6x9BDF = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi8x8ACE = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi8x9BDF = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi9x8ACE = _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi9x9BDF = _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi10x8ACE = _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi10x9BDF = _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi8x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi9xF9BD = _mm_shuffle_ps(vi9x9BDF, vi9x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi10xF9BD = _mm_shuffle_ps(vi10x9BDF, vi10x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+ const __m128 vi9x7BDF = _mm_move_ss(vi9xF9BD, vi9x7531);
+ const __m128 vi10x7BDF = _mm_move_ss(vi10xF9BD, vi10x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+ vi9x7531 = vi9xF9BD;
+ vi10x7531 = vi10xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+ __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+ vo4 = _mm_min_ps(vo4, vmax);
+
+ _mm_storeu_ps(o4, vo4);
+ o4 += 4;
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+ const __m128 vi9x89AB = _mm_loadu_ps(i9);
+ const __m128 vi9xCDEF = _mm_loadu_ps(i9 + 4);
+ const __m128 vi10x89AB = _mm_loadu_ps(i10);
+ const __m128 vi10xCDEF = _mm_loadu_ps(i10 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi5x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi5x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi6x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi6x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi8x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi8x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi9x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi9x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi10x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi10x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi8x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi9xF9BD = _mm_shuffle_ps(vi9x9BDF, vi9x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi10xF9BD = _mm_shuffle_ps(vi10x9BDF, vi10x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+ const __m128 vi9x7BDF = _mm_move_ss(vi9xF9BD, vi9x7531);
+ const __m128 vi10x7BDF = _mm_move_ss(vi10xF9BD, vi10x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+ vi9x7531 = vi9xF9BD;
+ vi10x7531 = vi10xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+ __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+ vo4 = _mm_min_ps(vo4, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o4, vo4);
+ o4 += 4;
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o4, vo4);
+ o4 += 2;
+ _mm_storel_pi((__m64*) o3, vo3);
+ o3 += 2;
+ _mm_storel_pi((__m64*) o2, vo2);
+ o2 += 2;
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ vo2 = _mm_movehl_ps(vo2, vo2);
+ vo3 = _mm_movehl_ps(vo3, vo3);
+ vo4 = _mm_movehl_ps(vo4, vo4);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o4, vo4);
+ o4 += 1;
+ _mm_store_ss(o3, vo3);
+ o3 += 1;
+ _mm_store_ss(o2, vo2);
+ o2 += 1;
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i10 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+ i5 = (const float*) ((uintptr_t) i4 + input_width);
+ i6 = (const float*) ((uintptr_t) i5 + input_width);
+ i7 = (const float*) ((uintptr_t) i6 + input_width);
+ i8 = (const float*) ((uintptr_t) i7 + input_width);
+ i9 = (const float*) ((uintptr_t) i8 + input_width);
+ i10 = (const float*) ((uintptr_t) i9 + input_width);
+
+ o0 = o4;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+ o2 = (float*) ((uintptr_t) o1 + output_width);
+ o3 = (float*) ((uintptr_t) o2 + output_width);
+ o4 = (float*) ((uintptr_t) o3 + output_width);
+
+ output_height = doz(output_height, 5);
+ padded_input_height = doz(padded_input_height, 10);
+ } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-6x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-6x4.c
new file mode 100644
index 0000000..d9d976f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-sse-6x4.c
@@ -0,0 +1,577 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4(
+ size_t input_height,
+ size_t input_width,
+ const float* input,
+ const float* weights,
+ const float* zero,
+ float* output,
+ uint32_t padding_top,
+ const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(input_height != 0);
+ assert(input_width != 0);
+ assert(input_width % sizeof(float) == 0);
+ assert(padding_top >= 0);
+ assert(padding_top <= 1);
+
+ const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
+ const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
+ const __m128 vmax = _mm_load_ps(params->sse.max);
+ const __m128 vmin = _mm_load_ps(params->sse.min);
+
+ const __m128 vbias = _mm_load1_ps(weights);
+ const __m128 vk00 = _mm_load1_ps(weights + 1);
+ const __m128 vk01 = _mm_load1_ps(weights + 2);
+ const __m128 vk02 = _mm_load1_ps(weights + 3);
+ const __m128 vk10 = _mm_load1_ps(weights + 4);
+ const __m128 vk11 = _mm_load1_ps(weights + 5);
+ const __m128 vk12 = _mm_load1_ps(weights + 6);
+ const __m128 vk20 = _mm_load1_ps(weights + 7);
+ const __m128 vk21 = _mm_load1_ps(weights + 8);
+ const __m128 vk22 = _mm_load1_ps(weights + 9);
+
+ const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+ const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+ const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+ const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+ if XNN_UNPREDICTABLE(padding_top != 0) {
+ i0 = zero;
+ }
+ const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+ const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+ const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+ const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+ const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+ const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+ const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+ const float* i9 = (const float*) ((uintptr_t) i8 + input_width);
+ const float* i10 = (const float*) ((uintptr_t) i9 + input_width);
+ const float* i11 = (const float*) ((uintptr_t) i10 + input_width);
+ const float* i12 = (const float*) ((uintptr_t) i11 + input_width);
+
+ float* o0 = output;
+ float* o1 = (float*) ((uintptr_t) o0 + output_width);
+ float* o2 = (float*) ((uintptr_t) o1 + output_width);
+ float* o3 = (float*) ((uintptr_t) o2 + output_width);
+ float* o4 = (float*) ((uintptr_t) o3 + output_width);
+ float* o5 = (float*) ((uintptr_t) o4 + output_width);
+
+ size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+ size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+ do {
+ if XNN_UNPREDICTABLE(padded_input_height < 4) {
+ i2 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 5) {
+ i3 = zero;
+ o1 = o0;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 6) {
+ i4 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 7) {
+ i5 = zero;
+ o2 = o1;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 8) {
+ i6 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 9) {
+ i7 = zero;
+ o3 = o2;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 10) {
+ i8 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 11) {
+ i9 = zero;
+ o4 = o3;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 12) {
+ i10 = zero;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 13) {
+ i11 = zero;
+ o5 = o4;
+ }
+ if XNN_UNPREDICTABLE(padded_input_height < 14) {
+ i12 = zero;
+ }
+
+ __m128 vi0x7531 = _mm_setzero_ps();
+ __m128 vi1x7531 = _mm_setzero_ps();
+ __m128 vi2x7531 = _mm_setzero_ps();
+ __m128 vi3x7531 = _mm_setzero_ps();
+ __m128 vi4x7531 = _mm_setzero_ps();
+ __m128 vi5x7531 = _mm_setzero_ps();
+ __m128 vi6x7531 = _mm_setzero_ps();
+ __m128 vi7x7531 = _mm_setzero_ps();
+ __m128 vi8x7531 = _mm_setzero_ps();
+ __m128 vi9x7531 = _mm_setzero_ps();
+ __m128 vi10x7531 = _mm_setzero_ps();
+ __m128 vi11x7531 = _mm_setzero_ps();
+ __m128 vi12x7531 = _mm_setzero_ps();
+
+ size_t w = input_width;
+ for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ i0 += 8;
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ i1 += 8;
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ i2 += 8;
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ i3 += 8;
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ i4 += 8;
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ i5 += 8;
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ i6 += 8;
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ i7 += 8;
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+ i8 += 8;
+ const __m128 vi9x89AB = _mm_loadu_ps(i9);
+ const __m128 vi9xCDEF = _mm_loadu_ps(i9 + 4);
+ i9 += 8;
+ const __m128 vi10x89AB = _mm_loadu_ps(i10);
+ const __m128 vi10xCDEF = _mm_loadu_ps(i10 + 4);
+ i10 += 8;
+ const __m128 vi11x89AB = _mm_loadu_ps(i11);
+ const __m128 vi11xCDEF = _mm_loadu_ps(i11 + 4);
+ i11 += 8;
+ const __m128 vi12x89AB = _mm_loadu_ps(i12);
+ const __m128 vi12xCDEF = _mm_loadu_ps(i12 + 4);
+ i12 += 8;
+
+ const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi5x8ACE = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi5x9BDF = _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi6x8ACE = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi6x9BDF = _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi8x8ACE = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi8x9BDF = _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi9x8ACE = _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi9x9BDF = _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi10x8ACE = _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi10x9BDF = _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi11x8ACE = _mm_shuffle_ps(vi11x89AB, vi11xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi11x9BDF = _mm_shuffle_ps(vi11x89AB, vi11xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+ const __m128 vi12x8ACE = _mm_shuffle_ps(vi12x89AB, vi12xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
+ const __m128 vi12x9BDF = _mm_shuffle_ps(vi12x89AB, vi12xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi8x8ACE, vk01));
+ __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi10x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x8ACE, vk11));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x8ACE, vk21));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi9xF9BD = _mm_shuffle_ps(vi9x9BDF, vi9x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi10xF9BD = _mm_shuffle_ps(vi10x9BDF, vi10x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi11xF9BD = _mm_shuffle_ps(vi11x9BDF, vi11x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi12xF9BD = _mm_shuffle_ps(vi12x9BDF, vi12x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x9BDF, vk02));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi10x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x9BDF, vk12));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x9BDF, vk22));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+ const __m128 vi9x7BDF = _mm_move_ss(vi9xF9BD, vi9x7531);
+ const __m128 vi10x7BDF = _mm_move_ss(vi10xF9BD, vi10x7531);
+ const __m128 vi11x7BDF = _mm_move_ss(vi11xF9BD, vi11x7531);
+ const __m128 vi12x7BDF = _mm_move_ss(vi12xF9BD, vi12x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+ vi9x7531 = vi9xF9BD;
+ vi10x7531 = vi10xF9BD;
+ vi11x7531 = vi11xF9BD;
+ vi12x7531 = vi12xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x7BDF, vk00));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi10x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x7BDF, vk10));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x7BDF, vk20));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+ __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+ __m128 vo5 = _mm_max_ps(vo5p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+ vo4 = _mm_min_ps(vo4, vmax);
+ vo5 = _mm_min_ps(vo5, vmax);
+
+ _mm_storeu_ps(o5, vo5);
+ o5 += 4;
+ _mm_storeu_ps(o4, vo4);
+ o4 += 4;
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ }
+ // Potentially process the last block of 0..7 pixels.
+ assert(w < 8 * sizeof(float));
+ if XNN_LIKELY(w != 0) {
+ const __m128 vi0x89AB = _mm_loadu_ps(i0);
+ const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
+ const __m128 vi1x89AB = _mm_loadu_ps(i1);
+ const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
+ const __m128 vi2x89AB = _mm_loadu_ps(i2);
+ const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
+ const __m128 vi3x89AB = _mm_loadu_ps(i3);
+ const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
+ const __m128 vi4x89AB = _mm_loadu_ps(i4);
+ const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
+ const __m128 vi5x89AB = _mm_loadu_ps(i5);
+ const __m128 vi5xCDEF = _mm_loadu_ps(i5 + 4);
+ const __m128 vi6x89AB = _mm_loadu_ps(i6);
+ const __m128 vi6xCDEF = _mm_loadu_ps(i6 + 4);
+ const __m128 vi7x89AB = _mm_loadu_ps(i7);
+ const __m128 vi7xCDEF = _mm_loadu_ps(i7 + 4);
+ const __m128 vi8x89AB = _mm_loadu_ps(i8);
+ const __m128 vi8xCDEF = _mm_loadu_ps(i8 + 4);
+ const __m128 vi9x89AB = _mm_loadu_ps(i9);
+ const __m128 vi9xCDEF = _mm_loadu_ps(i9 + 4);
+ const __m128 vi10x89AB = _mm_loadu_ps(i10);
+ const __m128 vi10xCDEF = _mm_loadu_ps(i10 + 4);
+ const __m128 vi11x89AB = _mm_loadu_ps(i11);
+ const __m128 vi11xCDEF = _mm_loadu_ps(i11 + 4);
+ const __m128 vi12x89AB = _mm_loadu_ps(i12);
+ const __m128 vi12xCDEF = _mm_loadu_ps(i12 + 4);
+
+ const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi3x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi3x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi4x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi4x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi5x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi5x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi5x89AB, vi5xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi6x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi6x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi6x89AB, vi6xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi8x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi8x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi8x89AB, vi8xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi9x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi9x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi9x89AB, vi9xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi10x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi10x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi10x89AB, vi10xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi11x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi11x89AB, vi11xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi11x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi11x89AB, vi11xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+ const __m128 vi12x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi12x89AB, vi12xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128 vi12x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi12x89AB, vi12xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
+ __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x8ACE, vk01));
+ __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x8ACE, vk01));
+ __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi6x8ACE, vk01));
+ __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi8x8ACE, vk01));
+ __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi10x8ACE, vk01));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x8ACE, vk11));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x8ACE, vk11));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x8ACE, vk11));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x8ACE, vk11));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x8ACE, vk11));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x8ACE, vk21));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x8ACE, vk21));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x8ACE, vk21));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x8ACE, vk21));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x8ACE, vk21));
+
+ const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi5xF9BD = _mm_shuffle_ps(vi5x9BDF, vi5x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi6xF9BD = _mm_shuffle_ps(vi6x9BDF, vi6x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi7xF9BD = _mm_shuffle_ps(vi7x9BDF, vi7x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi8xF9BD = _mm_shuffle_ps(vi8x9BDF, vi8x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi9xF9BD = _mm_shuffle_ps(vi9x9BDF, vi9x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi10xF9BD = _mm_shuffle_ps(vi10x9BDF, vi10x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi11xF9BD = _mm_shuffle_ps(vi11x9BDF, vi11x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+ const __m128 vi12xF9BD = _mm_shuffle_ps(vi12x9BDF, vi12x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x9BDF, vk02));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x9BDF, vk02));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x9BDF, vk02));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x9BDF, vk02));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi10x9BDF, vk02));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x9BDF, vk12));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x9BDF, vk12));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x9BDF, vk12));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x9BDF, vk12));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x9BDF, vk12));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x9BDF, vk22));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x9BDF, vk22));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x9BDF, vk22));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x9BDF, vk22));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x9BDF, vk22));
+
+ const __m128 vi0x7BDF = _mm_move_ss(vi0xF9BD, vi0x7531);
+ const __m128 vi1x7BDF = _mm_move_ss(vi1xF9BD, vi1x7531);
+ const __m128 vi2x7BDF = _mm_move_ss(vi2xF9BD, vi2x7531);
+ const __m128 vi3x7BDF = _mm_move_ss(vi3xF9BD, vi3x7531);
+ const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531);
+ const __m128 vi5x7BDF = _mm_move_ss(vi5xF9BD, vi5x7531);
+ const __m128 vi6x7BDF = _mm_move_ss(vi6xF9BD, vi6x7531);
+ const __m128 vi7x7BDF = _mm_move_ss(vi7xF9BD, vi7x7531);
+ const __m128 vi8x7BDF = _mm_move_ss(vi8xF9BD, vi8x7531);
+ const __m128 vi9x7BDF = _mm_move_ss(vi9xF9BD, vi9x7531);
+ const __m128 vi10x7BDF = _mm_move_ss(vi10xF9BD, vi10x7531);
+ const __m128 vi11x7BDF = _mm_move_ss(vi11xF9BD, vi11x7531);
+ const __m128 vi12x7BDF = _mm_move_ss(vi12xF9BD, vi12x7531);
+
+ vi0x7531 = vi0xF9BD;
+ vi1x7531 = vi1xF9BD;
+ vi2x7531 = vi2xF9BD;
+ vi3x7531 = vi3xF9BD;
+ vi4x7531 = vi4xF9BD;
+ vi5x7531 = vi5xF9BD;
+ vi6x7531 = vi6xF9BD;
+ vi7x7531 = vi7xF9BD;
+ vi8x7531 = vi8xF9BD;
+ vi9x7531 = vi9xF9BD;
+ vi10x7531 = vi10xF9BD;
+ vi11x7531 = vi11xF9BD;
+ vi12x7531 = vi12xF9BD;
+
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x7BDF, vk00));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x7BDF, vk00));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi6x7BDF, vk00));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x7BDF, vk00));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi10x7BDF, vk00));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x7BDF, vk10));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x7BDF, vk10));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi5x7BDF, vk10));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi7x7BDF, vk10));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi9x7BDF, vk10));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi11x7BDF, vk10));
+ vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x7BDF, vk20));
+ vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20));
+ vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi6x7BDF, vk20));
+ vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi8x7BDF, vk20));
+ vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi10x7BDF, vk20));
+ vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi12x7BDF, vk20));
+
+
+ __m128 vo0 = _mm_max_ps(vo0p0, vmin);
+ __m128 vo1 = _mm_max_ps(vo1p0, vmin);
+ __m128 vo2 = _mm_max_ps(vo2p0, vmin);
+ __m128 vo3 = _mm_max_ps(vo3p0, vmin);
+ __m128 vo4 = _mm_max_ps(vo4p0, vmin);
+ __m128 vo5 = _mm_max_ps(vo5p0, vmin);
+
+ vo0 = _mm_min_ps(vo0, vmax);
+ vo1 = _mm_min_ps(vo1, vmax);
+ vo2 = _mm_min_ps(vo2, vmax);
+ vo3 = _mm_min_ps(vo3, vmax);
+ vo4 = _mm_min_ps(vo4, vmax);
+ vo5 = _mm_min_ps(vo5, vmax);
+
+ if (w == 7 * sizeof(float)) {
+ _mm_storeu_ps(o5, vo5);
+ o5 += 4;
+ _mm_storeu_ps(o4, vo4);
+ o4 += 4;
+ _mm_storeu_ps(o3, vo3);
+ o3 += 4;
+ _mm_storeu_ps(o2, vo2);
+ o2 += 4;
+ _mm_storeu_ps(o1, vo1);
+ o1 += 4;
+ _mm_storeu_ps(o0, vo0);
+ o0 += 4;
+ } else {
+ w += 1 * sizeof(float);
+ if (w & (4 * sizeof(float))) {
+ _mm_storel_pi((__m64*) o5, vo5);
+ o5 += 2;
+ _mm_storel_pi((__m64*) o4, vo4);
+ o4 += 2;
+ _mm_storel_pi((__m64*) o3, vo3);
+ o3 += 2;
+ _mm_storel_pi((__m64*) o2, vo2);
+ o2 += 2;
+ _mm_storel_pi((__m64*) o1, vo1);
+ o1 += 2;
+ _mm_storel_pi((__m64*) o0, vo0);
+ o0 += 2;
+
+ vo0 = _mm_movehl_ps(vo0, vo0);
+ vo1 = _mm_movehl_ps(vo1, vo1);
+ vo2 = _mm_movehl_ps(vo2, vo2);
+ vo3 = _mm_movehl_ps(vo3, vo3);
+ vo4 = _mm_movehl_ps(vo4, vo4);
+ vo5 = _mm_movehl_ps(vo5, vo5);
+ }
+ if (w & (2 * sizeof(float))) {
+ _mm_store_ss(o5, vo5);
+ o5 += 1;
+ _mm_store_ss(o4, vo4);
+ o4 += 1;
+ _mm_store_ss(o3, vo3);
+ o3 += 1;
+ _mm_store_ss(o2, vo2);
+ o2 += 1;
+ _mm_store_ss(o1, vo1);
+ o1 += 1;
+ _mm_store_ss(o0, vo0);
+ o0 += 1;
+ }
+ }
+ }
+
+ i0 = (const float*) ((uintptr_t) i12 - input_decrement);
+ i1 = (const float*) ((uintptr_t) i0 + input_width);
+ i2 = (const float*) ((uintptr_t) i1 + input_width);
+ i3 = (const float*) ((uintptr_t) i2 + input_width);
+ i4 = (const float*) ((uintptr_t) i3 + input_width);
+ i5 = (const float*) ((uintptr_t) i4 + input_width);
+ i6 = (const float*) ((uintptr_t) i5 + input_width);
+ i7 = (const float*) ((uintptr_t) i6 + input_width);
+ i8 = (const float*) ((uintptr_t) i7 + input_width);
+ i9 = (const float*) ((uintptr_t) i8 + input_width);
+ i10 = (const float*) ((uintptr_t) i9 + input_width);
+ i11 = (const float*) ((uintptr_t) i10 + input_width);
+ i12 = (const float*) ((uintptr_t) i11 + input_width);
+
+ o0 = o5;
+ o1 = (float*) ((uintptr_t) o0 + output_width);
+ o2 = (float*) ((uintptr_t) o1 + output_width);
+ o3 = (float*) ((uintptr_t) o2 + output_width);
+ o4 = (float*) ((uintptr_t) o3 + output_width);
+ o5 = (float*) ((uintptr_t) o4 + output_width);
+
+ output_height = doz(output_height, 6);
+ padded_input_height = doz(padded_input_height, 12);
+ } while (output_height != 0);
+}
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index 0b24bbf..270f009 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -389,7 +389,14 @@
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc3)
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__psimd_1x4_acc3)
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2)
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2)
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_3x4)
DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__psimd_3x4)
diff --git a/test/f32-dwconv2d-chw.cc b/test/f32-dwconv2d-chw.cc
index a225b72..634ce91 100644
--- a/test/f32-dwconv2d-chw.cc
+++ b/test/f32-dwconv2d-chw.cc
@@ -3724,6 +3724,1182 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_height_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, output_height_gt_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 8; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_height_eq_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_height_div_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 8; input_height < 32; input_height += 4) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_height_lt_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, output_height_gt_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 5; input_height < 9; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 14; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(6)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(6)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(6)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(6)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_height_eq_3) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 5; input_height < 7; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_height_div_3) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 12; input_height < 48; input_height += 6) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_height_lt_3) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, output_height_gt_3) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 7; input_height < 13; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_3X4, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 20; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(8)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(8)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(8)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(8)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_height_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 7; input_height < 9; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_height_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 16; input_height < 64; input_height += 8) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_height_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 7; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, output_height_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 9; input_height < 17; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_4X4, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 26; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_height_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, output_height_gt_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC2, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 8; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_height_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_height_gt_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 8; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(2)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_height_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, output_height_gt_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC4, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 8; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_width_eq_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 7; input_width < 9; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_width_div_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 16; input_width < 64; input_width += 8) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_width_lt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 1; input_width < 7; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(8)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_width_gt_4) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_width = 9; input_width < 17; input_width++) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(4)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_height_eq_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 3; input_height < 5; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_height_div_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 8; input_height < 32; input_height += 4) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_height_lt_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 1; input_height < 3; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, output_height_gt_2) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 5; input_height < 9; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(1)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+ }
+
+ TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_2X4_ACC2, padding_top_eq_1) {
+ TEST_REQUIRES_X86_SSE;
+ for (size_t input_height = 2; input_height < 14; input_height++) {
+ for (size_t input_width = 1; input_width < 41; input_width += 7) {
+ DWConv2DMicrokernelTester()
+ .input_width(input_width)
+ .input_height(input_height)
+ .kernel_height(3)
+ .kernel_width(3)
+ .subsampling(2)
+ .padding_left(1)
+ .padding_right(1)
+ .padding_top(0)
+ .padding_bottom(1)
+ .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_DWCONV2D_CHW_3X3P1__SSSE3_1X4, output_width_eq_4) {
TEST_REQUIRES_X86_SSSE3;
DWConv2DMicrokernelTester()
@@ -4831,134 +6007,6 @@
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_eq_4) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_width = 7; input_width < 9; input_width++) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(2)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_div_4) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_width = 16; input_width < 64; input_width += 8) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(2)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_lt_4) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_width = 1; input_width < 7; input_width++) {
- DWConv2DMicrokernelTester()
- .input_width(8)
- .input_height(2)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_width_gt_4) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_width = 9; input_width < 17; input_width++) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(2)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_height_eq_1) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_height = 1; input_height < 3; input_height++) {
- for (size_t input_width = 1; input_width < 41; input_width += 7) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(input_height)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, output_height_gt_1) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_height = 3; input_height < 5; input_height++) {
- for (size_t input_width = 1; input_width < 41; input_width += 7) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(input_height)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(1)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
- }
-
- TEST(F32_DWCONV2D_CHW_3X3S2P1__SSE_1X4_ACC3, padding_top_eq_1) {
- TEST_REQUIRES_X86_SSE;
- for (size_t input_height = 2; input_height < 8; input_height++) {
- for (size_t input_width = 1; input_width < 41; input_width += 7) {
- DWConv2DMicrokernelTester()
- .input_width(input_width)
- .input_height(input_height)
- .kernel_height(3)
- .kernel_width(3)
- .subsampling(2)
- .padding_left(1)
- .padding_right(1)
- .padding_top(0)
- .padding_bottom(1)
- .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3);
- }
- }
- }
-#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
-
-
#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
TEST(F32_DWCONV2D_CHW_3X3P1__PSIMD_1X4_ACC3, output_width_eq_4) {
TEST_REQUIRES_PSIMD;
diff --git a/test/f32-dwconv2d-chw.yaml b/test/f32-dwconv2d-chw.yaml
index b4563a9..13a7bea 100644
--- a/test/f32-dwconv2d-chw.yaml
+++ b/test/f32-dwconv2d-chw.yaml
@@ -61,6 +61,14 @@
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4
@@ -71,7 +79,6 @@
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc3
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc4
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3
- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__psimd_1x4_acc3
- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__psimd_1x4_acc3
- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__psimd_3x4