SQRDIFF (Squared Difference) microkernels

PiperOrigin-RevId: 314881351
diff --git a/BUILD.bazel b/BUILD.bazel
index 5c95329..bbea588 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -246,9 +246,18 @@
     "src/f32-vbinary/gen/vrdivc-minmax-scalar-x1.c",
     "src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c",
     "src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c",
     "src/f32-vbinary/gen/vrsubc-minmax-scalar-x1.c",
     "src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c",
     "src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c",
+    "src/f32-vbinary/gen/vsqrdiff-scalar-x1.c",
+    "src/f32-vbinary/gen/vsqrdiff-scalar-x2.c",
+    "src/f32-vbinary/gen/vsqrdiff-scalar-x4.c",
+    "src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c",
+    "src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c",
+    "src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c",
     "src/f32-vbinary/gen/vsub-minmax-scalar-x1.c",
     "src/f32-vbinary/gen/vsub-minmax-scalar-x2.c",
     "src/f32-vbinary/gen/vsub-minmax-scalar-x4.c",
@@ -409,9 +418,18 @@
     "src/f32-vbinary/gen/vrdivc-minmax-wasm-x1.c",
     "src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c",
     "src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c",
     "src/f32-vbinary/gen/vrsubc-minmax-wasm-x1.c",
     "src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c",
     "src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c",
+    "src/f32-vbinary/gen/vsqrdiff-wasm-x1.c",
+    "src/f32-vbinary/gen/vsqrdiff-wasm-x2.c",
+    "src/f32-vbinary/gen/vsqrdiff-wasm-x4.c",
+    "src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c",
+    "src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c",
+    "src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c",
     "src/f32-vbinary/gen/vsub-minmax-wasm-x1.c",
     "src/f32-vbinary/gen/vsub-minmax-wasm-x2.c",
     "src/f32-vbinary/gen/vsub-minmax-wasm-x4.c",
@@ -507,8 +525,14 @@
     "src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c",
     "src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c",
     "src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c",
     "src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c",
     "src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c",
+    "src/f32-vbinary/gen/vsqrdiff-psimd-x4.c",
+    "src/f32-vbinary/gen/vsqrdiff-psimd-x8.c",
+    "src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c",
+    "src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c",
     "src/f32-vbinary/gen/vsub-minmax-psimd-x4.c",
     "src/f32-vbinary/gen/vsub-minmax-psimd-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c",
@@ -700,8 +724,14 @@
     "src/f32-vbinary/gen/vmul-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vmulc-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vmulc-minmax-neon-x8.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c",
     "src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c",
+    "src/f32-vbinary/gen/vsqrdiff-neon-x4.c",
+    "src/f32-vbinary/gen/vsqrdiff-neon-x8.c",
+    "src/f32-vbinary/gen/vsqrdiffc-neon-x4.c",
+    "src/f32-vbinary/gen/vsqrdiffc-neon-x8.c",
     "src/f32-vbinary/gen/vsub-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vsub-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-neon-x4.c",
@@ -1155,8 +1185,14 @@
     "src/f32-vbinary/gen/vmulc-minmax-sse-x8.c",
     "src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c",
     "src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c",
     "src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c",
     "src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c",
+    "src/f32-vbinary/gen/vsqrdiff-sse-x4.c",
+    "src/f32-vbinary/gen/vsqrdiff-sse-x8.c",
+    "src/f32-vbinary/gen/vsqrdiffc-sse-x4.c",
+    "src/f32-vbinary/gen/vsqrdiffc-sse-x8.c",
     "src/f32-vbinary/gen/vsub-minmax-sse-x4.c",
     "src/f32-vbinary/gen/vsub-minmax-sse-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-sse-x4.c",
@@ -1323,8 +1359,14 @@
     "src/f32-vbinary/gen/vmulc-minmax-avx-x16.c",
     "src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c",
     "src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c",
     "src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c",
     "src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c",
+    "src/f32-vbinary/gen/vsqrdiff-avx-x8.c",
+    "src/f32-vbinary/gen/vsqrdiff-avx-x16.c",
+    "src/f32-vbinary/gen/vsqrdiffc-avx-x8.c",
+    "src/f32-vbinary/gen/vsqrdiffc-avx-x16.c",
     "src/f32-vbinary/gen/vsub-minmax-avx-x8.c",
     "src/f32-vbinary/gen/vsub-minmax-avx-x16.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx-x8.c",
@@ -1591,8 +1633,14 @@
     "src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c",
     "src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c",
     "src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c",
+    "src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c",
     "src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c",
     "src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c",
+    "src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c",
+    "src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c",
+    "src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c",
+    "src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c",
     "src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c",
     "src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c",
@@ -3950,6 +3998,33 @@
 )
 
 xnnpack_unit_test(
+    name = "f32_vsqrdiff_test",
+    srcs = [
+        "test/f32-vsqrdiff.cc",
+        "test/vbinary-microkernel-tester.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
+    name = "f32_vsqrdiffc_test",
+    srcs = [
+        "test/f32-vsqrdiffc.cc",
+        "test/vbinaryc-microkernel-tester.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
+    name = "f32_vrsqrdiffc_test",
+    srcs = [
+        "test/f32-vrsqrdiffc.cc",
+        "test/vbinaryc-microkernel-tester.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
     name = "f32_vsub_minmax_test",
     srcs = [
         "test/f32-vsub-minmax.cc",
diff --git a/scripts/generate-f16-vbinary.sh b/scripts/generate-f16-vbinary.sh
index f6f6db3..e55dcca 100755
--- a/scripts/generate-f16-vbinary.sh
+++ b/scripts/generate-f16-vbinary.sh
@@ -5,35 +5,41 @@
 # LICENSE file in the root directory of this source tree.
 
 ################################### ARM NEON ##################################
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=ADD -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=ADD -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=DIV -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=DIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MAX -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MAX -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MIN -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmin-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MIN -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmin-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MUL -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MUL -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SUB -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=ADD     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=ADD     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=DIV     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=DIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MAX     -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MAX     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MIN     -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmin-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MIN     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmin-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MUL     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=MUL     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SQRDIFF -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SUB     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vop-neonfp16arith.c.in -D OP=SUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c
 
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=ADD  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=ADD  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=DIV  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=DIV  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RDIV -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RDIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MAX  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MAX  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MIN  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vminc-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MIN  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vminc-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MUL  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MUL  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SUB  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SUB  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSUB -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c
-tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=ADD      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=ADD      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=DIV      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=DIV      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RDIV     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RDIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MAX      -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MAX      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MIN      -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vminc-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MIN      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vminc-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MUL      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=MUL      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SQRDIFF  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SQRDIFF  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSQRDIFF -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SUB      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=SUB      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSUB     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c
+tools/xngen src/f16-vbinary/vopc-neonfp16arith.c.in -D OP=RSUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c
 
 ################################## Unit tests #################################
 tools/generate-vbinary-test.py --spec test/f16-vadd-minmax.yaml --output test/f16-vadd-minmax.cc
@@ -41,6 +47,7 @@
 tools/generate-vbinary-test.py --spec test/f16-vmax.yaml --output test/f16-vmax.cc
 tools/generate-vbinary-test.py --spec test/f16-vmin.yaml --output test/f16-vmin.cc
 tools/generate-vbinary-test.py --spec test/f16-vmul-minmax.yaml --output test/f16-vmul-minmax.cc
+tools/generate-vbinary-test.py --spec test/f16-vsqrdiff.yaml --output test/f16-vsqrdiff.cc
 tools/generate-vbinary-test.py --spec test/f16-vsub-minmax.yaml --output test/f16-vsub-minmax.cc
 tools/generate-vbinary-test.py --spec test/f16-vaddc-minmax.yaml --output test/f16-vaddc-minmax.cc
 tools/generate-vbinary-test.py --spec test/f16-vdivc-minmax.yaml --output test/f16-vdivc-minmax.cc
@@ -48,5 +55,7 @@
 tools/generate-vbinary-test.py --spec test/f16-vmaxc.yaml --output test/f16-vmaxc.cc
 tools/generate-vbinary-test.py --spec test/f16-vminc.yaml --output test/f16-vminc.cc
 tools/generate-vbinary-test.py --spec test/f16-vmulc-minmax.yaml --output test/f16-vmulc-minmax.cc
+tools/generate-vbinary-test.py --spec test/f16-vsqrdiffc.yaml --output test/f16-vsqrdiffc.cc
+tools/generate-vbinary-test.py --spec test/f16-vrsqrdiffc.yaml --output test/f16-vrsqrdiffc.cc
 tools/generate-vbinary-test.py --spec test/f16-vsubc-minmax.yaml --output test/f16-vsubc-minmax.cc
 tools/generate-vbinary-test.py --spec test/f16-vrsubc-minmax.yaml --output test/f16-vrsubc-minmax.cc
diff --git a/scripts/generate-f32-vbinary.sh b/scripts/generate-f32-vbinary.sh
index 66ef0ac..6a3c43f 100755
--- a/scripts/generate-f32-vbinary.sh
+++ b/scripts/generate-f32-vbinary.sh
@@ -6,249 +6,297 @@
 
 #################################### Scalar ###################################
 ### Generic C micro-kernels
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-scalar-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
 
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
 
 ### WAsm-specific micro-kernels
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=ADD     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=DIV     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MAX     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MIN     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=MUL     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SQRDIFF -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-wasm-x4.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vop-scalar.c.in -D OP=SUB     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
 
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x1.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
-tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=ADD      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=DIV      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RDIV     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MAX      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MIN      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=MUL      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SQRDIFF  -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSQRDIFF -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=SUB      -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=1 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x1.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=2 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
+tools/xngen src/f32-vbinary/vopc-scalar.c.in -D OP=RSUB     -D BATCH_TILE=4 -D WASM=1 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
 
 ################################### ARM NEON ##################################
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=ADD -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=ADD -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=DIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=DIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MAX -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MAX -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-neon-x8.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MIN -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MIN -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-neon-x8.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MUL -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MUL -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=ADD     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=ADD     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=DIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=DIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MAX     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MAX     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MIN     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MIN     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MUL     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=MUL     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-neon-x8.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vop-neon.c.in -D OP=SUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-neon-x8.c
 
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=ADD  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=ADD  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=DIV  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=DIV  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RDIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RDIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MAX  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MAX  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MIN  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MIN  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MUL  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MUL  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SUB  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SUB  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
-tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=ADD      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=ADD      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=DIV      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=DIV      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RDIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RDIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MAX      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MAX      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MIN      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MIN      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MUL      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=MUL      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SQRDIFF  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SQRDIFF  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SUB      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=SUB      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
+tools/xngen src/f32-vbinary/vopc-neon.c.in -D OP=RSUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
 
 #################################### PSIMD ####################################
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=ADD -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=ADD -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=DIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=DIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MAX -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MAX -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MIN -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MIN -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-psimd-x8.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MUL -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MUL -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=ADD     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=ADD     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=DIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=DIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MAX     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MAX     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MIN     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MIN     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MUL     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=MUL     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-psimd-x8.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vop-psimd.c.in -D OP=SUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
 
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=ADD  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=ADD  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=DIV  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=DIV  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RDIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RDIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MAX  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MAX  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MIN  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MIN  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MUL  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MUL  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SUB  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SUB  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
-tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=ADD      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=ADD      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=DIV      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=DIV      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RDIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RDIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MAX      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MAX      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MIN      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MIN      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MUL      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=MUL      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SQRDIFF  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SQRDIFF  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SUB      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=SUB      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
+tools/xngen src/f32-vbinary/vopc-psimd.c.in -D OP=RSUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
 
 ################################# x86 128-bit #################################
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=ADD -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=ADD -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=DIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=DIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MAX -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MAX -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-sse-x8.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MIN -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MIN -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-sse-x8.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MUL -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MUL -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=ADD     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=ADD     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=DIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=DIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MAX     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MAX     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MIN     -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MIN     -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MUL     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=MUL     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-sse-x8.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vop-sse.c.in -D OP=SUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-sse-x8.c
 
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=ADD  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=ADD  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=DIV  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=DIV  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RDIV -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RDIV -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MAX  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MAX  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MIN  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MIN  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MUL  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MUL  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SUB  -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SUB  -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSUB -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
-tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSUB -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=ADD      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=ADD      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=DIV      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=DIV      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RDIV     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RDIV     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MAX      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MAX      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MIN      -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MIN      -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MUL      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=MUL      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SQRDIFF  -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SQRDIFF  -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSQRDIFF -D BATCH_TILE=4 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSQRDIFF -D BATCH_TILE=8 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SUB      -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=SUB      -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSUB     -D BATCH_TILE=4 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
+tools/xngen src/f32-vbinary/vopc-sse.c.in -D OP=RSUB     -D BATCH_TILE=8 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
 
 ################################# x86 256-bit #################################
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=ADD -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=ADD -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=DIV -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=DIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MAX -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MAX -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx-x16.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MIN -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MIN -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx-x16.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MUL -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MUL -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SUB -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=ADD     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=ADD     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=DIV     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=DIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MAX     -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MAX     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MIN     -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MIN     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MUL     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=MUL     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SQRDIFF -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-avx-x16.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SUB     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vop-avx.c.in -D OP=SUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx-x16.c
 
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=ADD  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=ADD  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=DIV  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=DIV  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RDIV -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RDIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MAX  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MAX  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MIN  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MIN  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MUL  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MUL  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SUB  -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SUB  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSUB -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
-tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=ADD      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=ADD      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=DIV      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=DIV      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RDIV     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RDIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MAX      -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MAX      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MIN      -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MIN      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MUL      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=MUL      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SQRDIFF  -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SQRDIFF  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSQRDIFF -D BATCH_TILE=8  -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SUB      -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=SUB      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSUB     -D BATCH_TILE=8  -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
+tools/xngen src/f32-vbinary/vopc-avx.c.in -D OP=RSUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
 
 ################################# x86 512-bit #################################
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=ADD -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=ADD -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=DIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=DIV -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MAX -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MAX -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MIN -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MIN -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx512f-x32.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MUL -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MUL -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SUB -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=ADD     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=ADD     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=DIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=DIV     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MAX     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MAX     -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MIN     -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MIN     -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmin-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MUL     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=MUL     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SQRDIFF -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vop-avx512f.c.in -D OP=SUB     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
 
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=ADD  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=ADD  -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=DIV  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=DIV  -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RDIV -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RDIV -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MAX  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MAX  -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MIN  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MIN  -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MUL  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MUL  -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SUB  -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SUB  -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSUB -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
-tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSUB -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=ADD      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=ADD      -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=DIV      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=DIV      -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RDIV     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RDIV     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MAX      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MAX      -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vmaxc-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MIN      -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MIN      -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vminc-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MUL      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=MUL      -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SQRDIFF  -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SQRDIFF  -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSQRDIFF -D BATCH_TILE=16 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSQRDIFF -D BATCH_TILE=32 -D ACTIVATION=LINEAR -o src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SUB      -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=SUB      -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSUB     -D BATCH_TILE=16 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
+tools/xngen src/f32-vbinary/vopc-avx512f.c.in -D OP=RSUB     -D BATCH_TILE=32 -D ACTIVATION=MINMAX -o src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
 
 ################################## Unit tests #################################
 tools/generate-vbinary-test.py --spec test/f32-vadd-minmax.yaml --output test/f32-vadd-minmax.cc
@@ -256,6 +304,7 @@
 tools/generate-vbinary-test.py --spec test/f32-vmax.yaml --output test/f32-vmax.cc
 tools/generate-vbinary-test.py --spec test/f32-vmin.yaml --output test/f32-vmin.cc
 tools/generate-vbinary-test.py --spec test/f32-vmul-minmax.yaml --output test/f32-vmul-minmax.cc
+tools/generate-vbinary-test.py --spec test/f32-vsqrdiff.yaml --output test/f32-vsqrdiff.cc
 tools/generate-vbinary-test.py --spec test/f32-vsub-minmax.yaml --output test/f32-vsub-minmax.cc
 tools/generate-vbinary-test.py --spec test/f32-vaddc-minmax.yaml --output test/f32-vaddc-minmax.cc
 tools/generate-vbinary-test.py --spec test/f32-vdivc-minmax.yaml --output test/f32-vdivc-minmax.cc
@@ -263,5 +312,7 @@
 tools/generate-vbinary-test.py --spec test/f32-vmaxc.yaml --output test/f32-vmaxc.cc
 tools/generate-vbinary-test.py --spec test/f32-vminc.yaml --output test/f32-vminc.cc
 tools/generate-vbinary-test.py --spec test/f32-vmulc-minmax.yaml --output test/f32-vmulc-minmax.cc
+tools/generate-vbinary-test.py --spec test/f32-vsqrdiffc.yaml --output test/f32-vsqrdiffc.cc
+tools/generate-vbinary-test.py --spec test/f32-vrsqrdiffc.yaml --output test/f32-vrsqrdiffc.cc
 tools/generate-vbinary-test.py --spec test/f32-vsubc-minmax.yaml --output test/f32-vsubc-minmax.cc
 tools/generate-vbinary-test.py --spec test/f32-vrsubc-minmax.yaml --output test/f32-vrsubc-minmax.cc
diff --git a/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c
index dd06329..61db283 100644
--- a/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x16.c
@@ -41,6 +41,7 @@
     float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
     float16x8_t vy456789AB = vaddq_f16(va456789AB, vb456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c
index 60a45a5..2740e92 100644
--- a/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vadd-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c
index 6724dd2..4b89679 100644
--- a/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
     float16x8_t vy456789AB = vaddq_f16(va456789AB, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c
index d717db1..c8cc3d3 100644
--- a/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vaddc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c
index 8796155..59d7b04 100644
--- a/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x16.c
@@ -41,6 +41,7 @@
     float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
     float16x8_t vy456789AB = vdivq_f16(va456789AB, vb456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c
index d262c2c..f2f7bf4 100644
--- a/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vdiv-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c
index 901bcfe..f99e8c6 100644
--- a/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
     float16x8_t vy456789AB = vdivq_f16(va456789AB, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c
index a3536d5..69b3b6e 100644
--- a/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vdivc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vmax-neonfp16arith-x16.c
index f1ad54a..0c4dd9c 100644
--- a/src/f16-vbinary/gen/vmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy456789AB = vmaxq_f16(va456789AB, vb456789AB);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
     vst1q_f16(y, vy456789AB); y += 8;
   }
diff --git a/src/f16-vbinary/gen/vmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vmax-neonfp16arith-x8.c
index 9b6e7f1..b3dfe1c 100644
--- a/src/f16-vbinary/gen/vmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vmax-neonfp16arith-x8.c
@@ -37,6 +37,7 @@
     float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
   }
   for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
diff --git a/src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c b/src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c
index 5a9520d..4cff9ab 100644
--- a/src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vmaxc-neonfp16arith-x16.c
@@ -39,6 +39,7 @@
     float16x8_t vy456789AB = vmaxq_f16(va456789AB, vb);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
     vst1q_f16(y, vy456789AB); y += 8;
   }
diff --git a/src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c b/src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c
index 2c51399..86e2644 100644
--- a/src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vmaxc-neonfp16arith-x8.c
@@ -37,6 +37,7 @@
     float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
   }
   for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
diff --git a/src/f16-vbinary/gen/vmin-neonfp16arith-x16.c b/src/f16-vbinary/gen/vmin-neonfp16arith-x16.c
index 9f71aea..e167e0f 100644
--- a/src/f16-vbinary/gen/vmin-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vmin-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy456789AB = vminq_f16(va456789AB, vb456789AB);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
     vst1q_f16(y, vy456789AB); y += 8;
   }
diff --git a/src/f16-vbinary/gen/vmin-neonfp16arith-x8.c b/src/f16-vbinary/gen/vmin-neonfp16arith-x8.c
index 4dea07b..fe8625b 100644
--- a/src/f16-vbinary/gen/vmin-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vmin-neonfp16arith-x8.c
@@ -37,6 +37,7 @@
     float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
   }
   for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
diff --git a/src/f16-vbinary/gen/vminc-neonfp16arith-x16.c b/src/f16-vbinary/gen/vminc-neonfp16arith-x16.c
index 7be3462..0d0b1a3 100644
--- a/src/f16-vbinary/gen/vminc-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vminc-neonfp16arith-x16.c
@@ -39,6 +39,7 @@
     float16x8_t vy456789AB = vminq_f16(va456789AB, vb);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
     vst1q_f16(y, vy456789AB); y += 8;
   }
diff --git a/src/f16-vbinary/gen/vminc-neonfp16arith-x8.c b/src/f16-vbinary/gen/vminc-neonfp16arith-x8.c
index d517d3b..00bc21c 100644
--- a/src/f16-vbinary/gen/vminc-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vminc-neonfp16arith-x8.c
@@ -37,6 +37,7 @@
     float16x8_t vy01234567 = vminq_f16(va01234567, vb);
 
 
+
     vst1q_f16(y, vy01234567); y += 8;
   }
   for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
diff --git a/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c
index 80b0c9b..c73a09b 100644
--- a/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x16.c
@@ -41,6 +41,7 @@
     float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
     float16x8_t vy456789AB = vmulq_f16(va456789AB, vb456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c
index ac1329c..6b009ff 100644
--- a/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vmul-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c
index d053af9..ac24e50 100644
--- a/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
     float16x8_t vy456789AB = vmulq_f16(va456789AB, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c
index f513983..74341a9 100644
--- a/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vmulc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c
index a7d6e05..965fb53 100644
--- a/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
     float16x8_t vy456789AB = vdivq_f16(vb, va456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c
index 5c69ec6..ced3ab4 100644
--- a/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vrdivc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x16.c b/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x16.c
new file mode 100644
index 0000000..c6f0e49
--- /dev/null
+++ b/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x16.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vopc-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  const float16x8_t vb = vld1q_dup_f16(b);
+  for (; n >= 16 * sizeof(__fp16); n -= 16 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t va456789AB = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+    float16x8_t vy456789AB = vsubq_f16(vb, va456789AB);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vy456789AB = vmulq_f16(vy456789AB, vy456789AB);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+    vst1q_f16(y, vy456789AB); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x8.c b/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x8.c
new file mode 100644
index 0000000..b41643f
--- /dev/null
+++ b/src/f16-vbinary/gen/vrsqrdiffc-neonfp16arith-x8.c
@@ -0,0 +1,72 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vopc-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  const float16x8_t vb = vld1q_dup_f16(b);
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+
+    float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c
index e3659e8..9c1e3d9 100644
--- a/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
     float16x8_t vy456789AB = vsubq_f16(vb, va456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c
index 6aa171e..248d38e 100644
--- a/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vrsubc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x16.c b/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x16.c
new file mode 100644
index 0000000..17019b4
--- /dev/null
+++ b/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x16.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vop-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  for (; n >= 16 * sizeof(__fp16); n -= 16 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t vb01234567 = vld1q_f16(b); b += 8;
+    const float16x8_t va456789AB = vld1q_f16(a); a += 8;
+    const float16x8_t vb456789AB = vld1q_f16(b); b += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+    float16x8_t vy456789AB = vsubq_f16(va456789AB, vb456789AB);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vy456789AB = vmulq_f16(vy456789AB, vy456789AB);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+    vst1q_f16(y, vy456789AB); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t vb01234567 = vld1q_f16(b); b += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+    const float16x8_t vb01234567 = vld1q_f16(b);
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x8.c b/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x8.c
new file mode 100644
index 0000000..f6481d7
--- /dev/null
+++ b/src/f16-vbinary/gen/vsqrdiff-neonfp16arith-x8.c
@@ -0,0 +1,74 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vop-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t vb01234567 = vld1q_f16(b); b += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t vb01234567 = vld1q_f16(b); b += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+    const float16x8_t vb01234567 = vld1q_f16(b);
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x16.c b/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x16.c
new file mode 100644
index 0000000..f1132b3
--- /dev/null
+++ b/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x16.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vopc-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  const float16x8_t vb = vld1q_dup_f16(b);
+  for (; n >= 16 * sizeof(__fp16); n -= 16 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+    const float16x8_t va456789AB = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+    float16x8_t vy456789AB = vsubq_f16(va456789AB, vb);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vy456789AB = vmulq_f16(vy456789AB, vy456789AB);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+    vst1q_f16(y, vy456789AB); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x8.c b/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x8.c
new file mode 100644
index 0000000..94480ce
--- /dev/null
+++ b/src/f16-vbinary/gen/vsqrdiffc-neonfp16arith-x8.c
@@ -0,0 +1,72 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-vbinary/vopc-neonfp16arith.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8(
+    size_t n,
+    const void* restrict a_ptr,
+    const void* restrict b_ptr,
+    void* restrict y_ptr,
+    const struct xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(__fp16) == 0);
+
+  const __fp16* a = (const __fp16*) a_ptr;
+  const __fp16* b = (const __fp16*) b_ptr;
+  __fp16* y = (__fp16*) y_ptr;
+
+
+  const float16x8_t vb = vld1q_dup_f16(b);
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
+    const float16x8_t va01234567 = vld1q_f16(a); a += 8;
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+    vst1q_f16(y, vy01234567); y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float16x8_t va01234567 = vld1q_f16(a);
+
+    float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
+    vy01234567 = vmulq_f16(vy01234567, vy01234567);
+
+    float16x4_t vy0123 = vget_low_f16(vy01234567);
+    if (n & (4 * sizeof(__fp16))) {
+      vst1_f16(y, vy0123); y += 4;
+      vy0123 = vget_high_f16(vy01234567);
+    }
+
+    if (n & (2 * sizeof(__fp16))) {
+      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vy0123), 0); y += 2;
+      vy0123 = vext_f16(vy0123, vy0123, 2);
+    }
+
+    if (n & (1 * sizeof(__fp16))) {
+      vst1_lane_f16(y, vy0123, 0);
+    }
+  }
+}
diff --git a/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c
index adc0703..6e9097c 100644
--- a/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x16.c
@@ -41,6 +41,7 @@
     float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
     float16x8_t vy456789AB = vsubq_f16(va456789AB, vb456789AB);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c
index 8460f1b..37db543 100644
--- a/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vsub-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c b/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c
index c8d1a3e..dd8705b 100644
--- a/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c
+++ b/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x16.c
@@ -40,6 +40,7 @@
     float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
     float16x8_t vy456789AB = vsubq_f16(va456789AB, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
     vy456789AB = vmaxq_f16(vy456789AB, vy_min);
 
diff --git a/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c b/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c
index b3b1e76..d022cf6 100644
--- a/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c
+++ b/src/f16-vbinary/gen/vsubc-minmax-neonfp16arith-x8.c
@@ -38,6 +38,7 @@
 
     float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
 
+
     vy01234567 = vmaxq_f16(vy01234567, vy_min);
 
     vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/vop-neonfp16arith.c.in b/src/f16-vbinary/vop-neonfp16arith.c.in
index 0587185..449d69b 100644
--- a/src/f16-vbinary/vop-neonfp16arith.c.in
+++ b/src/f16-vbinary/vop-neonfp16arith.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "01234567456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "vminq_f16(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "vmulq_f16(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "vsubq_f16(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "vsubq_f16(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f16_default_params", "MINMAX": "xnn_f16_minmax_params"}[ACTIVATION]
@@ -52,6 +53,10 @@
     $for N in range(0, BATCH_TILE, 8):
       float16x8_t vy${ABC[N:N+8]} = ${VOPQ_f16("va" + ABC[N:N+8], "vb" + ABC[N:N+8])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = vmulq_f16(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = vmaxq_f16(vy${ABC[N:N+8]}, vy_min);
@@ -68,6 +73,8 @@
       const float16x8_t vb01234567 = vld1q_f16(b); b += 8;
 
       float16x8_t vy01234567 = ${VOPQ_f16("va01234567", "vb01234567")};
+      $if OP == "SQRDIFF":
+        vy01234567 = vmulq_f16(vy01234567, vy01234567);
       $if ACTIVATION == "MINMAX":
         vy01234567 = vmaxq_f16(vy01234567, vy_min);
         vy01234567 = vminq_f16(vy01234567, vy_max);
@@ -78,6 +85,8 @@
     const float16x8_t vb01234567 = vld1q_f16(b);
 
     float16x8_t vy01234567 = ${VOPQ_f16("va01234567", "vb01234567")};
+    $if OP == "SQRDIFF":
+      vy01234567 = vmulq_f16(vy01234567, vy01234567);
     $if ACTIVATION == "MINMAX":
       vy01234567 = vmaxq_f16(vy01234567, vy_min);
       vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f16-vbinary/vopc-neonfp16arith.c.in b/src/f16-vbinary/vopc-neonfp16arith.c.in
index cbc11f6..cc2c0b1 100644
--- a/src/f16-vbinary/vopc-neonfp16arith.c.in
+++ b/src/f16-vbinary/vopc-neonfp16arith.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "01234567456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "vmulq_f16(%s, vb)" % x,
 $  "SUB": lambda x: "vsubq_f16(%s, vb)" % x,
 $  "RSUB": lambda x: "vsubq_f16(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "vsubq_f16(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "vsubq_f16(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f16_default_params", "MINMAX": "xnn_f16_minmax_params"}[ACTIVATION]
@@ -54,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 8):
       float16x8_t vy${ABC[N:N+8]} = ${VOPQ_f16("va" + ABC[N:N+8])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = vmulq_f16(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = vmaxq_f16(vy${ABC[N:N+8]}, vy_min);
@@ -69,6 +75,8 @@
       const float16x8_t va01234567 = vld1q_f16(a); a += 8;
 
       float16x8_t vy01234567 = ${VOPQ_f16("va01234567")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy01234567 = vmulq_f16(vy01234567, vy01234567);
       $if ACTIVATION == "MINMAX":
         vy01234567 = vmaxq_f16(vy01234567, vy_min);
         vy01234567 = vminq_f16(vy01234567, vy_max);
@@ -78,6 +86,8 @@
     const float16x8_t va01234567 = vld1q_f16(a);
 
     float16x8_t vy01234567 = ${VOPQ_f16("va01234567")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy01234567 = vmulq_f16(vy01234567, vy01234567);
     $if ACTIVATION == "MINMAX":
       vy01234567 = vmaxq_f16(vy01234567, vy_min);
       vy01234567 = vminq_f16(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx-x16.c b/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
index b799115..2722f82 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_add_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx-x8.c b/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
index cd3fd03..6292a1e 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
index 27b9960..0524e99 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
index ecb71ab..f33cefb 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_add_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-neon-x4.c b/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
index f0f9ed5..ddca8e2 100644
--- a/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vaddq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-neon-x8.c b/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
index b2e074f..9f4ff35 100644
--- a/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vaddq_f32(va0123, vb0123);
     float32x4_t vy4567 = vaddq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c b/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
index 95addcf..7a8e7dd 100644
--- a/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c b/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
index 5fef417..6de0617 100644
--- a/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_add_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c b/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
index 63093fd..0bfbd55 100644
--- a/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 + vb0;
     float vy1 = va1 + vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c b/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
index 2e182e0..8fbabcb 100644
--- a/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 + vb2;
     float vy3 = va3 + vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vadd-minmax-sse-x4.c b/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
index a1079d3..0990f78 100644
--- a/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_add_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-sse-x8.c b/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
index e661dc4..c6f9ac6 100644
--- a/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_add_ps(va0123, vb0123);
     __m128 vy4567 = _mm_add_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c b/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
index 6e0a742..67c3c2a 100644
--- a/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 + vb0;
     float vy1 = va1 + vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c b/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
index 27219bb..ddbafca 100644
--- a/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 + vb2;
     float vy3 = va3 + vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c b/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
index cdbf270..140c3dc 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_add_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c b/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
index 2d95818..f18a121 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
index 7536546..7407b41 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
index 4eb57d8..c0cb054 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_add_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c b/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
index d68aa6f..d1b1d38 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vaddq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c b/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
index b2f16d6..3c3fe34 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vaddq_f32(va0123, vb);
     float32x4_t vy4567 = vaddq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
index 68f2b44..c9e4231 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
index 9dfb664..c2f1f24 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_add_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
index d9ff3cf..e183f22 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 + vb;
     float vy1 = va1 + vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
index 7652791..51cb7e6 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 + vb;
     float vy3 = va3 + vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c b/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
index 57181d4..1eb07d4 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_add_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c b/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
index bfd25cd..a169ca6 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_add_ps(va0123, vb);
     __m128 vy4567 = _mm_add_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
index 78dc878..44c66cc 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 + vb;
     float vy1 = va1 + vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
index 5c958c3..def140a 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 + vb;
     float vy3 = va3 + vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c b/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
index e2e6f6d..134ad9a 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c b/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
index a4e3283..0f01664 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
index bd602b6..61f290b 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
index 784d575..52ef1da 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c b/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
index a905e67..bb8c8d3 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c b/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
index 687682e..1ad0784 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vdivq_f32(va0123, vb0123);
     float32x4_t vy4567 = vdivq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c b/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
index 48d73bb..6186946 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c b/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
index 184c35f..e5016cc 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_div_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c b/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
index b1b9e4a..824c2bd 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 / vb0;
     float vy1 = va1 / vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c b/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
index 2effa49..3d6a8cd 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 / vb2;
     float vy3 = va3 / vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c b/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
index d9df6d6..2c69c00 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_div_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c b/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
index deea4ce..42e11c6 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_div_ps(va0123, vb0123);
     __m128 vy4567 = _mm_div_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c b/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
index 9f045b1..f7a5cce 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 / vb0;
     float vy1 = va1 / vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c b/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
index e472e86..212b0d6 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 / vb2;
     float vy3 = va3 / vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c b/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
index 6411097..945916c 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c b/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
index 9a51c51..e612f0f 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
index 9a90ed3..1163351 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
index 4f535fe..1a84a0e 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c b/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
index d669455..5bb8b5f 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c b/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
index c65cf67..1517447 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vdivq_f32(va0123, vb);
     float32x4_t vy4567 = vdivq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
index 4334692..409a945 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
index a73a171..9f05b23 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_div_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
index 520b555..954af51 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 / vb;
     float vy1 = va1 / vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
index 113cdbf..0d05701 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 / vb;
     float vy3 = va3 / vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c b/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
index 6784a57..0ab1ff9 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_div_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c b/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
index 033a817..7d36f03 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_div_ps(va0123, vb);
     __m128 vy4567 = _mm_div_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
index 41363d6..eafeef2 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 / vb;
     float vy1 = va1 / vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
index 6e36564..f46d880 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 / vb;
     float vy3 = va3 / vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmax-avx-x16.c b/src/f32-vbinary/gen/vmax-avx-x16.c
index 438f0bb..df889aa 100644
--- a/src/f32-vbinary/gen/vmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmax-avx-x16.c
@@ -41,6 +41,7 @@
     __m256 vy89ABCDEF = _mm256_max_ps(va89ABCDEF, vb89ABCDEF);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmax-avx-x8.c b/src/f32-vbinary/gen/vmax-avx-x8.c
index 92de68f..df7c785 100644
--- a/src/f32-vbinary/gen/vmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmax-avx-x8.c
@@ -38,6 +38,7 @@
     __m256 vy01234567 = _mm256_max_ps(va01234567, vb01234567);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmax-avx512f-x16.c b/src/f32-vbinary/gen/vmax-avx512f-x16.c
index c5d324d..505a792 100644
--- a/src/f32-vbinary/gen/vmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmax-avx512f-x16.c
@@ -37,6 +37,7 @@
     __m512 vy0123456789ABCDEF = _mm512_max_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmax-avx512f-x32.c b/src/f32-vbinary/gen/vmax-avx512f-x32.c
index 469bf65..26768c8 100644
--- a/src/f32-vbinary/gen/vmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmax-avx512f-x32.c
@@ -40,6 +40,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmax-neon-x4.c b/src/f32-vbinary/gen/vmax-neon-x4.c
index 2dddd9c..ebbc034 100644
--- a/src/f32-vbinary/gen/vmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmax-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vmaxq_f32(va0123, vb0123);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmax-neon-x8.c b/src/f32-vbinary/gen/vmax-neon-x8.c
index f8a5ce1..79fc7a2 100644
--- a/src/f32-vbinary/gen/vmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy4567 = vmaxq_f32(va4567, vb4567);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-psimd-x4.c b/src/f32-vbinary/gen/vmax-psimd-x4.c
index ae20426..001490d 100644
--- a/src/f32-vbinary/gen/vmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmax-psimd-x4.c
@@ -36,6 +36,7 @@
     psimd_f32 vy0123 = psimd_max_f32(va0123, vb0123);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-psimd-x8.c b/src/f32-vbinary/gen/vmax-psimd-x8.c
index af455a9..f6396e0 100644
--- a/src/f32-vbinary/gen/vmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmax-psimd-x8.c
@@ -39,6 +39,7 @@
     psimd_f32 vy4567 = psimd_max_f32(va4567, vb4567);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmax-scalar-x2.c b/src/f32-vbinary/gen/vmax-scalar-x2.c
index 59315bb..3c41548 100644
--- a/src/f32-vbinary/gen/vmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmax-scalar-x2.c
@@ -38,6 +38,7 @@
     float vy1 = math_max_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmax-scalar-x4.c b/src/f32-vbinary/gen/vmax-scalar-x4.c
index 0f3102c..729cbd8 100644
--- a/src/f32-vbinary/gen/vmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmax-scalar-x4.c
@@ -44,6 +44,7 @@
     float vy3 = math_max_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmax-sse-x4.c b/src/f32-vbinary/gen/vmax-sse-x4.c
index 104446c..41be09c 100644
--- a/src/f32-vbinary/gen/vmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmax-sse-x4.c
@@ -37,6 +37,7 @@
     __m128 vy0123 = _mm_max_ps(va0123, vb0123);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-sse-x8.c b/src/f32-vbinary/gen/vmax-sse-x8.c
index 225873f..095f75f 100644
--- a/src/f32-vbinary/gen/vmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmax-sse-x8.c
@@ -40,6 +40,7 @@
     __m128 vy4567 = _mm_max_ps(va4567, vb4567);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmax-wasm-x2.c b/src/f32-vbinary/gen/vmax-wasm-x2.c
index 70bb55d..813b12e 100644
--- a/src/f32-vbinary/gen/vmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmax-wasm-x2.c
@@ -38,6 +38,7 @@
     float vy1 = __builtin_wasm_max_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmax-wasm-x4.c b/src/f32-vbinary/gen/vmax-wasm-x4.c
index f8d36cc..556703d 100644
--- a/src/f32-vbinary/gen/vmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmax-wasm-x4.c
@@ -44,6 +44,7 @@
     float vy3 = __builtin_wasm_max_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmaxc-avx-x16.c b/src/f32-vbinary/gen/vmaxc-avx-x16.c
index c2d7ce7..ead7d47 100644
--- a/src/f32-vbinary/gen/vmaxc-avx-x16.c
+++ b/src/f32-vbinary/gen/vmaxc-avx-x16.c
@@ -38,6 +38,7 @@
     __m256 vy89ABCDEF = _mm256_max_ps(va89ABCDEF, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmaxc-avx-x8.c b/src/f32-vbinary/gen/vmaxc-avx-x8.c
index 0c3c83f..91f55d2 100644
--- a/src/f32-vbinary/gen/vmaxc-avx-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-avx-x8.c
@@ -36,6 +36,7 @@
     __m256 vy01234567 = _mm256_max_ps(va01234567, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-avx512f-x16.c b/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
index dee5815..356d90d 100644
--- a/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
@@ -35,6 +35,7 @@
     __m512 vy0123456789ABCDEF = _mm512_max_ps(va0123456789ABCDEF, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-avx512f-x32.c b/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
index 5e4bf2a..aba2ef5 100644
--- a/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
@@ -37,6 +37,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vaGHIJKLMNOPQRSTUV, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmaxc-neon-x4.c b/src/f32-vbinary/gen/vmaxc-neon-x4.c
index e94c918..a996ab1 100644
--- a/src/f32-vbinary/gen/vmaxc-neon-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vmaxq_f32(va0123, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmaxc-neon-x8.c b/src/f32-vbinary/gen/vmaxc-neon-x8.c
index 8132011..c246fae 100644
--- a/src/f32-vbinary/gen/vmaxc-neon-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-neon-x8.c
@@ -35,6 +35,7 @@
     float32x4_t vy4567 = vmaxq_f32(va4567, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-psimd-x4.c b/src/f32-vbinary/gen/vmaxc-psimd-x4.c
index 3310f68..bc43c4f 100644
--- a/src/f32-vbinary/gen/vmaxc-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-psimd-x4.c
@@ -34,6 +34,7 @@
     psimd_f32 vy0123 = psimd_max_f32(va0123, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-psimd-x8.c b/src/f32-vbinary/gen/vmaxc-psimd-x8.c
index c74546a..9a6cf1d 100644
--- a/src/f32-vbinary/gen/vmaxc-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-psimd-x8.c
@@ -36,6 +36,7 @@
     psimd_f32 vy4567 = psimd_max_f32(va4567, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmaxc-scalar-x2.c b/src/f32-vbinary/gen/vmaxc-scalar-x2.c
index 1c46421..e9dc506 100644
--- a/src/f32-vbinary/gen/vmaxc-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmaxc-scalar-x2.c
@@ -35,6 +35,7 @@
     float vy1 = math_max_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmaxc-scalar-x4.c b/src/f32-vbinary/gen/vmaxc-scalar-x4.c
index f683c56..5459acf 100644
--- a/src/f32-vbinary/gen/vmaxc-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-scalar-x4.c
@@ -39,6 +39,7 @@
     float vy3 = math_max_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmaxc-sse-x4.c b/src/f32-vbinary/gen/vmaxc-sse-x4.c
index 72061bb..6f80551 100644
--- a/src/f32-vbinary/gen/vmaxc-sse-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-sse-x4.c
@@ -35,6 +35,7 @@
     __m128 vy0123 = _mm_max_ps(va0123, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-sse-x8.c b/src/f32-vbinary/gen/vmaxc-sse-x8.c
index dfb5bc2..98a45ca 100644
--- a/src/f32-vbinary/gen/vmaxc-sse-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-sse-x8.c
@@ -37,6 +37,7 @@
     __m128 vy4567 = _mm_max_ps(va4567, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmaxc-wasm-x2.c b/src/f32-vbinary/gen/vmaxc-wasm-x2.c
index 4d7f50c..d80941e 100644
--- a/src/f32-vbinary/gen/vmaxc-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmaxc-wasm-x2.c
@@ -35,6 +35,7 @@
     float vy1 = __builtin_wasm_max_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmaxc-wasm-x4.c b/src/f32-vbinary/gen/vmaxc-wasm-x4.c
index 43f58fc..0fa1d17 100644
--- a/src/f32-vbinary/gen/vmaxc-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-wasm-x4.c
@@ -39,6 +39,7 @@
     float vy3 = __builtin_wasm_max_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmin-avx-x16.c b/src/f32-vbinary/gen/vmin-avx-x16.c
index 67d8eb9..046b03e 100644
--- a/src/f32-vbinary/gen/vmin-avx-x16.c
+++ b/src/f32-vbinary/gen/vmin-avx-x16.c
@@ -41,6 +41,7 @@
     __m256 vy89ABCDEF = _mm256_min_ps(va89ABCDEF, vb89ABCDEF);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmin-avx-x8.c b/src/f32-vbinary/gen/vmin-avx-x8.c
index 728d2b7..a37be18 100644
--- a/src/f32-vbinary/gen/vmin-avx-x8.c
+++ b/src/f32-vbinary/gen/vmin-avx-x8.c
@@ -38,6 +38,7 @@
     __m256 vy01234567 = _mm256_min_ps(va01234567, vb01234567);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmin-avx512f-x16.c b/src/f32-vbinary/gen/vmin-avx512f-x16.c
index 1feb387..b977956 100644
--- a/src/f32-vbinary/gen/vmin-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmin-avx512f-x16.c
@@ -37,6 +37,7 @@
     __m512 vy0123456789ABCDEF = _mm512_min_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmin-avx512f-x32.c b/src/f32-vbinary/gen/vmin-avx512f-x32.c
index c3c2aca..894374c 100644
--- a/src/f32-vbinary/gen/vmin-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmin-avx512f-x32.c
@@ -40,6 +40,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_min_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmin-neon-x4.c b/src/f32-vbinary/gen/vmin-neon-x4.c
index 0490b96..f4bfbd6 100644
--- a/src/f32-vbinary/gen/vmin-neon-x4.c
+++ b/src/f32-vbinary/gen/vmin-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vminq_f32(va0123, vb0123);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmin-neon-x8.c b/src/f32-vbinary/gen/vmin-neon-x8.c
index ebd4855..2c936c3 100644
--- a/src/f32-vbinary/gen/vmin-neon-x8.c
+++ b/src/f32-vbinary/gen/vmin-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy4567 = vminq_f32(va4567, vb4567);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-psimd-x4.c b/src/f32-vbinary/gen/vmin-psimd-x4.c
index de2fe90..22cddb1 100644
--- a/src/f32-vbinary/gen/vmin-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmin-psimd-x4.c
@@ -36,6 +36,7 @@
     psimd_f32 vy0123 = psimd_min_f32(va0123, vb0123);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-psimd-x8.c b/src/f32-vbinary/gen/vmin-psimd-x8.c
index cc1f12e..da7fc6a 100644
--- a/src/f32-vbinary/gen/vmin-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmin-psimd-x8.c
@@ -39,6 +39,7 @@
     psimd_f32 vy4567 = psimd_min_f32(va4567, vb4567);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmin-scalar-x2.c b/src/f32-vbinary/gen/vmin-scalar-x2.c
index f2fc1a1..d4ee3a4 100644
--- a/src/f32-vbinary/gen/vmin-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmin-scalar-x2.c
@@ -38,6 +38,7 @@
     float vy1 = math_min_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmin-scalar-x4.c b/src/f32-vbinary/gen/vmin-scalar-x4.c
index 2d454ea..71fc93c 100644
--- a/src/f32-vbinary/gen/vmin-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmin-scalar-x4.c
@@ -44,6 +44,7 @@
     float vy3 = math_min_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmin-sse-x4.c b/src/f32-vbinary/gen/vmin-sse-x4.c
index 50e7733..a27399e 100644
--- a/src/f32-vbinary/gen/vmin-sse-x4.c
+++ b/src/f32-vbinary/gen/vmin-sse-x4.c
@@ -37,6 +37,7 @@
     __m128 vy0123 = _mm_min_ps(va0123, vb0123);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-sse-x8.c b/src/f32-vbinary/gen/vmin-sse-x8.c
index e6fb094..500ed65 100644
--- a/src/f32-vbinary/gen/vmin-sse-x8.c
+++ b/src/f32-vbinary/gen/vmin-sse-x8.c
@@ -40,6 +40,7 @@
     __m128 vy4567 = _mm_min_ps(va4567, vb4567);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmin-wasm-x2.c b/src/f32-vbinary/gen/vmin-wasm-x2.c
index 79cacbd..92e467a 100644
--- a/src/f32-vbinary/gen/vmin-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmin-wasm-x2.c
@@ -38,6 +38,7 @@
     float vy1 = __builtin_wasm_min_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmin-wasm-x4.c b/src/f32-vbinary/gen/vmin-wasm-x4.c
index b63760a..75e23c9 100644
--- a/src/f32-vbinary/gen/vmin-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmin-wasm-x4.c
@@ -44,6 +44,7 @@
     float vy3 = __builtin_wasm_min_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vminc-avx-x16.c b/src/f32-vbinary/gen/vminc-avx-x16.c
index 4523f46..c15c0e3 100644
--- a/src/f32-vbinary/gen/vminc-avx-x16.c
+++ b/src/f32-vbinary/gen/vminc-avx-x16.c
@@ -38,6 +38,7 @@
     __m256 vy89ABCDEF = _mm256_min_ps(va89ABCDEF, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vminc-avx-x8.c b/src/f32-vbinary/gen/vminc-avx-x8.c
index 5f09206..8807026 100644
--- a/src/f32-vbinary/gen/vminc-avx-x8.c
+++ b/src/f32-vbinary/gen/vminc-avx-x8.c
@@ -36,6 +36,7 @@
     __m256 vy01234567 = _mm256_min_ps(va01234567, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vminc-avx512f-x16.c b/src/f32-vbinary/gen/vminc-avx512f-x16.c
index 29317f7..9f1bb8f 100644
--- a/src/f32-vbinary/gen/vminc-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vminc-avx512f-x16.c
@@ -35,6 +35,7 @@
     __m512 vy0123456789ABCDEF = _mm512_min_ps(va0123456789ABCDEF, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vminc-avx512f-x32.c b/src/f32-vbinary/gen/vminc-avx512f-x32.c
index dd313af..6a1ebb3 100644
--- a/src/f32-vbinary/gen/vminc-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vminc-avx512f-x32.c
@@ -37,6 +37,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_min_ps(vaGHIJKLMNOPQRSTUV, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vminc-neon-x4.c b/src/f32-vbinary/gen/vminc-neon-x4.c
index d0663bf..0ccc583 100644
--- a/src/f32-vbinary/gen/vminc-neon-x4.c
+++ b/src/f32-vbinary/gen/vminc-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vminq_f32(va0123, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vminc-neon-x8.c b/src/f32-vbinary/gen/vminc-neon-x8.c
index 3a6bcbc..517f13a 100644
--- a/src/f32-vbinary/gen/vminc-neon-x8.c
+++ b/src/f32-vbinary/gen/vminc-neon-x8.c
@@ -35,6 +35,7 @@
     float32x4_t vy4567 = vminq_f32(va4567, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-psimd-x4.c b/src/f32-vbinary/gen/vminc-psimd-x4.c
index a5481d1..7133aba 100644
--- a/src/f32-vbinary/gen/vminc-psimd-x4.c
+++ b/src/f32-vbinary/gen/vminc-psimd-x4.c
@@ -34,6 +34,7 @@
     psimd_f32 vy0123 = psimd_min_f32(va0123, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-psimd-x8.c b/src/f32-vbinary/gen/vminc-psimd-x8.c
index 92d9603..4adfb23 100644
--- a/src/f32-vbinary/gen/vminc-psimd-x8.c
+++ b/src/f32-vbinary/gen/vminc-psimd-x8.c
@@ -36,6 +36,7 @@
     psimd_f32 vy4567 = psimd_min_f32(va4567, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vminc-scalar-x2.c b/src/f32-vbinary/gen/vminc-scalar-x2.c
index 9118c94..cea44df 100644
--- a/src/f32-vbinary/gen/vminc-scalar-x2.c
+++ b/src/f32-vbinary/gen/vminc-scalar-x2.c
@@ -35,6 +35,7 @@
     float vy1 = math_min_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vminc-scalar-x4.c b/src/f32-vbinary/gen/vminc-scalar-x4.c
index e6fa215..2f2deda 100644
--- a/src/f32-vbinary/gen/vminc-scalar-x4.c
+++ b/src/f32-vbinary/gen/vminc-scalar-x4.c
@@ -39,6 +39,7 @@
     float vy3 = math_min_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vminc-sse-x4.c b/src/f32-vbinary/gen/vminc-sse-x4.c
index 48084b2..ca36369 100644
--- a/src/f32-vbinary/gen/vminc-sse-x4.c
+++ b/src/f32-vbinary/gen/vminc-sse-x4.c
@@ -35,6 +35,7 @@
     __m128 vy0123 = _mm_min_ps(va0123, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-sse-x8.c b/src/f32-vbinary/gen/vminc-sse-x8.c
index 63f9bc5..7f4a11b 100644
--- a/src/f32-vbinary/gen/vminc-sse-x8.c
+++ b/src/f32-vbinary/gen/vminc-sse-x8.c
@@ -37,6 +37,7 @@
     __m128 vy4567 = _mm_min_ps(va4567, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vminc-wasm-x2.c b/src/f32-vbinary/gen/vminc-wasm-x2.c
index 35ded66..d50e60a 100644
--- a/src/f32-vbinary/gen/vminc-wasm-x2.c
+++ b/src/f32-vbinary/gen/vminc-wasm-x2.c
@@ -35,6 +35,7 @@
     float vy1 = __builtin_wasm_min_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vminc-wasm-x4.c b/src/f32-vbinary/gen/vminc-wasm-x4.c
index 3a24bb5..d8ac0ad 100644
--- a/src/f32-vbinary/gen/vminc-wasm-x4.c
+++ b/src/f32-vbinary/gen/vminc-wasm-x4.c
@@ -39,6 +39,7 @@
     float vy3 = __builtin_wasm_min_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx-x16.c b/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
index 01660f8..7e2590d 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_mul_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx-x8.c b/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
index 0c2cc91..ba42e00 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
index 209ebd1..de64190 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
index 3a047dc..1ccf9fb 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-neon-x4.c b/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
index 869794e..818d3f5 100644
--- a/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-neon-x8.c b/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
index 65af2bc..73de3bf 100644
--- a/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
     float32x4_t vy4567 = vmulq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c b/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
index 2bab626..860bfa4 100644
--- a/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c b/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
index 555495e..a0e0e0f 100644
--- a/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_mul_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c b/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
index 720ff1a..191611e 100644
--- a/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 * vb0;
     float vy1 = va1 * vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c b/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
index 1c48260..1d2e783 100644
--- a/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 * vb2;
     float vy3 = va3 * vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmul-minmax-sse-x4.c b/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
index 6b1dca7..0f0dd69 100644
--- a/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_mul_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-sse-x8.c b/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
index dbbdfbd..6099c87 100644
--- a/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_mul_ps(va0123, vb0123);
     __m128 vy4567 = _mm_mul_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c b/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
index 85e1075..1891226 100644
--- a/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 * vb0;
     float vy1 = va1 * vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c b/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
index f71344c..0731aa7 100644
--- a/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 * vb2;
     float vy3 = va3 * vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c b/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
index a054201..29cc7b4 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_mul_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c b/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
index 07692ff..c32e846 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
index 7c91298..708e1e3 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
index 0f77c38..dca652f 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c b/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
index 01121e7..93de31d 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vmulq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c b/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
index 6608408..8693b14 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vmulq_f32(va0123, vb);
     float32x4_t vy4567 = vmulq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
index ea190b3..94c46bc 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
index 15c0f2b..f58446b 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_mul_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
index 31dc83f..fe53df5 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 * vb;
     float vy1 = va1 * vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
index dfef131..24e4bcf 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 * vb;
     float vy3 = va3 * vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c b/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
index 995cf45..294bc58 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_mul_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c b/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
index dbc41a9..de42813 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_mul_ps(va0123, vb);
     __m128 vy4567 = _mm_mul_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
index f76e59f..4d8d071 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 * vb;
     float vy1 = va1 * vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
index 6f8be74..5281c86 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 * vb;
     float vy3 = va3 * vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c b/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
index 3be3723..47c7e79 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_div_ps(vb, va01234567);
     __m256 vy89ABCDEF = _mm256_div_ps(vb, va89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
index c7ae348..3e18136 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(vb, va01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
index 5216232..1a9520a 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(vb, va0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
index c5eb881..65acd81 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(vb, va0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vb, vaGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
index f0cb985..7ffa485 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(vb, va0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
index 90dc55e..bb5e055 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vdivq_f32(vb, va0123);
     float32x4_t vy4567 = vdivq_f32(vb, va4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
index 257663b..2b939f2 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(vb, va0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
index fbfab1e..1f675a5 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_div_f32(vb, va0123);
     psimd_f32 vy4567 = psimd_div_f32(vb, va4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
index 0c53873..f3e82c6 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb / va0;
     float vy1 = vb / va1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
index e27237f..8cc8737 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb / va2;
     float vy3 = vb / va3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
index 3db24ff..dfb4490 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_div_ps(vb, va0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
index ae22169..7e72a02 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_div_ps(vb, va0123);
     __m128 vy4567 = _mm_div_ps(vb, va4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
index 6d2dc38..a9f70f6 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb / va0;
     float vy1 = vb / va1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
index e19c0e8..5b689e3 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb / va2;
     float vy3 = vb / va3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c b/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c
new file mode 100644
index 0000000..426991a
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c
@@ -0,0 +1,83 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vrsqrdiffc_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
+    __m256 vy89ABCDEF = _mm256_sub_ps(vb, va89ABCDEF);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c
new file mode 100644
index 0000000..19d7d6d
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vrsqrdiffc_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c
new file mode 100644
index 0000000..60c84d9
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c
new file mode 100644
index 0000000..70f96f1
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vb, vaGHIJKLMNOPQRSTUV);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c
new file mode 100644
index 0000000..2c87ff5
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c
new file mode 100644
index 0000000..43304d6
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    float32x4_t vy4567 = vsubq_f32(vb, va4567);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c
new file mode 100644
index 0000000..50eab11
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c
new file mode 100644
index 0000000..837f30a
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    psimd_f32 vy4567 = psimd_sub_f32(vb, va4567);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c
new file mode 100644
index 0000000..e26adbe
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c
new file mode 100644
index 0000000..5363ed9
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c
new file mode 100644
index 0000000..ad61bc6
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+    float vy2 = vb - va2;
+    float vy3 = vb - va3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = vb - va;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c
new file mode 100644
index 0000000..bae4f27
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c
new file mode 100644
index 0000000..9127dd5
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c
@@ -0,0 +1,70 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    __m128 vy4567 = _mm_sub_ps(vb, va4567);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c
new file mode 100644
index 0000000..b439180
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c
new file mode 100644
index 0000000..62a4e1f
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c
new file mode 100644
index 0000000..dc6d893
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+    float vy2 = vb - va2;
+    float vy3 = vb - va3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = vb - va;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c b/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
index 5789c9c..4e73860 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
     __m256 vy89ABCDEF = _mm256_sub_ps(vb, va89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
index 067ea70..7b91d98 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
index ce4f27b..0b9775d 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
index a160607..b4c7d05 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vb, vaGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
index 5940782..9f3f79f 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(vb, va0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
index eaae8da..e68b6cb 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vsubq_f32(vb, va0123);
     float32x4_t vy4567 = vsubq_f32(vb, va4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
index b59870d..febe35c 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
index f67b850..f57dbf5 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
     psimd_f32 vy4567 = psimd_sub_f32(vb, va4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
index e06a755..a6189df 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb - va0;
     float vy1 = vb - va1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
index 6a999c1..64afb58 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb - va2;
     float vy3 = vb - va3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
index 7d03760..4647adb 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_sub_ps(vb, va0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
index bdafcf9..853faa6 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_sub_ps(vb, va0123);
     __m128 vy4567 = _mm_sub_ps(vb, va4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
index e58d2f1..c66ae81 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb - va0;
     float vy1 = vb - va1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
index c69c285..5b2dfec 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb - va2;
     float vy3 = vb - va3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx-x16.c b/src/f32-vbinary/gen/vsqrdiff-avx-x16.c
new file mode 100644
index 0000000..d29cfec
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx-x16.c
@@ -0,0 +1,90 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiff_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    const __m256 vb01234567 = _mm256_loadu_ps(b);
+    const __m256 vb89ABCDEF = _mm256_loadu_ps(b + 8);
+    b += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
+    __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb89ABCDEF);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+    const __m256 vb = _mm256_maskload_ps(b, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx-x8.c b/src/f32-vbinary/gen/vsqrdiff-avx-x8.c
new file mode 100644
index 0000000..c2deaf0
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx-x8.c
@@ -0,0 +1,85 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiff_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb01234567 = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+    const __m256 vb = _mm256_maskload_ps(b, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c b/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c
new file mode 100644
index 0000000..93b109f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c
@@ -0,0 +1,71 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb0123456789ABCDEF = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+    const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c b/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c
new file mode 100644
index 0000000..c232e68
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    const __m512 vb0123456789ABCDEF = _mm512_loadu_ps(b);
+    const __m512 vbGHIJKLMNOPQRSTUV = _mm512_loadu_ps(b + 16);
+    b += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+    const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-neon-x4.c b/src/f32-vbinary/gen/vsqrdiff-neon-x4.c
new file mode 100644
index 0000000..9f28346
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-neon-x4.c
@@ -0,0 +1,64 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+    const float32x4_t vb0123 = vld1q_f32(b);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-neon-x8.c b/src/f32-vbinary/gen/vsqrdiff-neon-x8.c
new file mode 100644
index 0000000..bda32d1
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-neon-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+    const float32x4_t vb4567 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    float32x4_t vy4567 = vsubq_f32(va4567, vb4567);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+    const float32x4_t vb0123 = vld1q_f32(b);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c b/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c
new file mode 100644
index 0000000..be4f52d
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c
@@ -0,0 +1,71 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c b/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c
new file mode 100644
index 0000000..7910349
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    const psimd_f32 vb4567 = psimd_load_f32(b + 4);
+    b += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    psimd_f32 vy4567 = psimd_sub_f32(va4567, vb4567);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c
new file mode 100644
index 0000000..9f8ff4e
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    const float vb = *b++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c
new file mode 100644
index 0000000..ab47261
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c
@@ -0,0 +1,55 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    b += 2;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    const float vb = *b;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c
new file mode 100644
index 0000000..e850af8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c
@@ -0,0 +1,68 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    const float vb2 = b[2];
+    const float vb3 = b[3];
+    b += 4;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+    float vy2 = va2 - vb2;
+    float vy3 = va3 - vb3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      const float vb = *b++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-sse-x4.c b/src/f32-vbinary/gen/vsqrdiff-sse-x4.c
new file mode 100644
index 0000000..5904bcb
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-sse-x4.c
@@ -0,0 +1,72 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 vb0123 = _mm_loadu_ps(b);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-sse-x8.c b/src/f32-vbinary/gen/vsqrdiff-sse-x8.c
new file mode 100644
index 0000000..ca6ab69
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-sse-x8.c
@@ -0,0 +1,77 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    const __m128 vb4567 = _mm_loadu_ps(b + 4);
+    b += 8;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    __m128 vy4567 = _mm_sub_ps(va4567, vb4567);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 vb0123 = _mm_loadu_ps(b);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c
new file mode 100644
index 0000000..7f0f293
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    const float vb = *b++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c
new file mode 100644
index 0000000..d5103ec
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c
@@ -0,0 +1,55 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    b += 2;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    const float vb = *b;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c
new file mode 100644
index 0000000..a17b560
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c
@@ -0,0 +1,68 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    const float vb2 = b[2];
+    const float vb3 = b[3];
+    b += 4;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+    float vy2 = va2 - vb2;
+    float vy3 = va3 - vb3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      const float vb = *b++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c b/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c
new file mode 100644
index 0000000..636766f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c
@@ -0,0 +1,83 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiffc_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
+    __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c b/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c
new file mode 100644
index 0000000..2e9bdab
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiffc_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c
new file mode 100644
index 0000000..f43d9d8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c
new file mode 100644
index 0000000..487632f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vb);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c b/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c
new file mode 100644
index 0000000..aa1c257
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c b/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c
new file mode 100644
index 0000000..da2f1dc
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    float32x4_t vy4567 = vsubq_f32(va4567, vb);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c b/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c
new file mode 100644
index 0000000..8a2a174
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c b/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c
new file mode 100644
index 0000000..d47eeea
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    psimd_f32 vy4567 = psimd_sub_f32(va4567, vb);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c
new file mode 100644
index 0000000..7a88d0a
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c
new file mode 100644
index 0000000..63771e5
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c
new file mode 100644
index 0000000..9fcc8dc
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+    float vy2 = va2 - vb;
+    float vy3 = va3 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c b/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c
new file mode 100644
index 0000000..24bdde6
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c b/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c
new file mode 100644
index 0000000..43c1eb0
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c
@@ -0,0 +1,70 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    __m128 vy4567 = _mm_sub_ps(va4567, vb);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c
new file mode 100644
index 0000000..abaa0f8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c
new file mode 100644
index 0000000..415d9be
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c
new file mode 100644
index 0000000..07fdf22
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+    float vy2 = va2 - vb;
+    float vy3 = va3 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx-x16.c b/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
index ff72f26..fd9aee3 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx-x8.c b/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
index 7504d94..515afd7 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
index 2bf7b7f..36d84ec 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
index 8311cb7..ed289c6 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-neon-x4.c b/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
index 4f9b839..62a9564 100644
--- a/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-neon-x8.c b/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
index 3f3ca44..a5b4ada 100644
--- a/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
     float32x4_t vy4567 = vsubq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c b/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
index 445d44a..1065e95 100644
--- a/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c b/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
index 07e8171..f1931ec 100644
--- a/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_sub_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c b/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
index 27c3d69..16cdb9d 100644
--- a/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 - vb0;
     float vy1 = va1 - vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c b/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
index 33c0b97..81e52d0 100644
--- a/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 - vb2;
     float vy3 = va3 - vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsub-minmax-sse-x4.c b/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
index f61af92..9509137 100644
--- a/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-sse-x8.c b/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
index 8995fca..83d9ed2 100644
--- a/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
     __m128 vy4567 = _mm_sub_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c b/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
index 6ac500c..eb66d63 100644
--- a/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 - vb0;
     float vy1 = va1 - vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c b/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
index ddedd13..167c5d6 100644
--- a/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 - vb2;
     float vy3 = va3 - vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c b/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
index dc876a7..7b5800c 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c b/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
index 76e973a..30d34cf 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
index 003eef5..cc1553c 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
index d7db19c..5aa88d5 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c b/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
index 88afb06..0b44608 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c b/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
index 112e8f6..3c8f247 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vsubq_f32(va0123, vb);
     float32x4_t vy4567 = vsubq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
index 1816d8f..b583755 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
index 014e32d..6633b80 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_sub_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
index 393eeb1..16b8ca3 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 - vb;
     float vy1 = va1 - vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
index b8bde24..589ae49 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 - vb;
     float vy3 = va3 - vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c b/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
index d57eeb6..62611f4 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_sub_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c b/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
index 39f4da1..735102b 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_sub_ps(va0123, vb);
     __m128 vy4567 = _mm_sub_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
index c219eca..c7463d6 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 - vb;
     float vy1 = va1 - vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
index 21c60b4..1970ca9 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 - vb;
     float vy3 = va3 - vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/vop-avx.c.in b/src/f32-vbinary/vop-avx.c.in
index 7863bec..b09f80f 100644
--- a/src/f32-vbinary/vop-avx.c.in
+++ b/src/f32-vbinary/vop-avx.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,7 @@
 $  "MIN": lambda x, y: "_mm256_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm256_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm256_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm256_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -56,6 +57,10 @@
     $for N in range(0, BATCH_TILE, 8):
       __m256 vy${ABC[N:N+8]} = ${_MM256_OP_PS("va" + ABC[N:N+8], "vb" + ABC[N:N+8])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = _mm256_max_ps(vy${ABC[N:N+8]}, vy_min);
@@ -77,6 +82,8 @@
       b += 8;
 
       __m256 vy = ${_MM256_OP_PS("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = _mm256_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm256_max_ps(vy, vy_min);
         vy = _mm256_min_ps(vy, vy_max);
@@ -92,6 +99,8 @@
     const __m256 vb = _mm256_maskload_ps(b, vmask);
 
     __m256 vy = ${_MM256_OP_PS("va", "vb")};
+    $if OP == "SQRDIFF":
+      vy = _mm256_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm256_max_ps(vy, vy_min);
       vy = _mm256_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vop-avx512f.c.in b/src/f32-vbinary/vop-avx512f.c.in
index f36b303..cafb5ec 100644
--- a/src/f32-vbinary/vop-avx512f.c.in
+++ b/src/f32-vbinary/vop-avx512f.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 16 == 0
 $assert BATCH_TILE >= 16
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -24,6 +24,7 @@
 $  "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -55,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 16):
       __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 16):
+        vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 16):
         vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
@@ -76,6 +81,8 @@
       b += 16;
 
       __m512 vy = ${_MM512_OP_PS("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = _mm512_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm512_max_ps(vy, vy_min);
         vy = _mm512_min_ps(vy, vy_max);
@@ -93,6 +100,8 @@
     const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
 
     __m512 vy = ${_MM512_OP_PS("va", "vb")};
+    $if OP == "SQRDIFF":
+      vy = _mm512_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm512_max_ps(vy, vy_min);
       vy = _mm512_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vop-neon.c.in b/src/f32-vbinary/vop-neon.c.in
index f427fdb..4a20103 100644
--- a/src/f32-vbinary/vop-neon.c.in
+++ b/src/f32-vbinary/vop-neon.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "vminq_f32(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "vmulq_f32(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "vsubq_f32(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "vsubq_f32(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -48,6 +49,10 @@
     $for N in range(0, BATCH_TILE, 4):
       float32x4_t vy${ABC[N:N+4]} = ${VOPQ_F32("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = vmulq_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = vmaxq_f32(vy${ABC[N:N+4]}, vy_min);
@@ -64,6 +69,8 @@
       const float32x4_t vb0123 = vld1q_f32(b); b += 4;
 
       float32x4_t vy0123 = ${VOPQ_F32("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = vmulq_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = vmaxq_f32(vy0123, vy_min);
         vy0123 = vminq_f32(vy0123, vy_max);
@@ -74,6 +81,8 @@
     const float32x4_t vb0123 = vld1q_f32(b);
 
     float32x4_t vy0123 = ${VOPQ_F32("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = vmulq_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = vmaxq_f32(vy0123, vy_min);
       vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vop-psimd.c.in b/src/f32-vbinary/vop-psimd.c.in
index f1640f6..ffd03f2 100644
--- a/src/f32-vbinary/vop-psimd.c.in
+++ b/src/f32-vbinary/vop-psimd.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "psimd_min_f32(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "psimd_mul_f32(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "psimd_sub_f32(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "psimd_sub_f32(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -54,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 4):
       psimd_f32 vy${ABC[N:N+4]} = ${PSIMD_OP_F32("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = psimd_mul_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = psimd_max_f32(vy${ABC[N:N+4]}, vy_min);
@@ -75,6 +80,8 @@
       b += 4;
 
       psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = psimd_mul_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = psimd_max_f32(vy0123, vy_min);
         vy0123 = psimd_min_f32(vy0123, vy_max);
@@ -86,6 +93,8 @@
     const psimd_f32 vb0123 = psimd_load_f32(b);
 
     psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = psimd_mul_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = psimd_max_f32(vy0123, vy_min);
       vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vop-scalar.c.in b/src/f32-vbinary/vop-scalar.c.in
index dabe3a0..85696ac 100644
--- a/src/f32-vbinary/vop-scalar.c.in
+++ b/src/f32-vbinary/vop-scalar.c.in
@@ -5,7 +5,7 @@
 
 $assert BATCH_TILE >= 1
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "%s(%s, %s)" % (MIN_F32, x, y),
 $  "MUL": lambda x, y: "%s * %s" % (x, y),
 $  "SUB": lambda x, y: "%s - %s" % (x, y),
+$  "SQRDIFF": lambda x, y: "%s - %s" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +54,10 @@
       $for N in range(BATCH_TILE):
         float vy${ABC[N]} = ${OP_FUNC("va" + ABC[N], "vb" + ABC[N])};
 
+      $if OP == "SQRDIFF":
+        $for N in range(BATCH_TILE):
+          vy${ABC[N]} = vy${ABC[N]} * vy${ABC[N]};
+
       $if ACTIVATION == "MINMAX":
         $for N in range(BATCH_TILE):
           vy${ABC[N]} = ${MAX_F32}(vy${ABC[N]}, vy_min);
@@ -70,6 +75,8 @@
           const float va = *a++;
           const float vb = *b++;
           float vy = ${OP_FUNC("va", "vb")};
+          $if OP == "SQRDIFF":
+            vy = vy * vy;
           $if ACTIVATION == "MINMAX":
             vy = ${MAX_F32}(vy, vy_min);
             vy = ${MIN_F32}(vy, vy_max);
@@ -80,6 +87,8 @@
         const float va = *a;
         const float vb = *b;
         float vy = ${OP_FUNC("va", "vb")};
+        $if OP == "SQRDIFF":
+          vy = vy * vy;
         $if ACTIVATION == "MINMAX":
           vy = ${MAX_F32}(vy, vy_min);
           vy = ${MIN_F32}(vy, vy_max);
@@ -90,6 +99,8 @@
       const float va = *a++;
       const float vb = *b++;
       float vy = ${OP_FUNC("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = vy * vy;
       $if ACTIVATION == "MINMAX":
         vy = ${MAX_F32}(vy, vy_min);
         vy = ${MIN_F32}(vy, vy_max);
diff --git a/src/f32-vbinary/vop-sse.c.in b/src/f32-vbinary/vop-sse.c.in
index 3825e8d..1565ce7 100644
--- a/src/f32-vbinary/vop-sse.c.in
+++ b/src/f32-vbinary/vop-sse.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -24,6 +24,7 @@
 $  "MIN": lambda x, y: "_mm_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -55,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 4):
       __m128 vy${ABC[N:N+4]} = ${_MM_OP_PS("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = _mm_max_ps(vy${ABC[N:N+4]}, vy_min);
@@ -76,6 +81,8 @@
       b += 4;
 
       __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = _mm_mul_ps(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = _mm_max_ps(vy0123, vy_min);
         vy0123 = _mm_min_ps(vy0123, vy_max);
@@ -87,6 +94,8 @@
     const __m128 vb0123 = _mm_loadu_ps(b);
 
     __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = _mm_mul_ps(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = _mm_max_ps(vy0123, vy_min);
       vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-avx.c.in b/src/f32-vbinary/vopc-avx.c.in
index 380b1b8..badc839 100644
--- a/src/f32-vbinary/vopc-avx.c.in
+++ b/src/f32-vbinary/vopc-avx.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -27,6 +27,8 @@
 $  "MUL": lambda x: "_mm256_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm256_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm256_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm256_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm256_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -54,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 8):
       __m256 vy${ABC[N:N+8]} = ${_MM256_OP_PS("va" + ABC[N:N+8])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = _mm256_max_ps(vy${ABC[N:N+8]}, vy_min);
@@ -72,6 +78,8 @@
       a += 8;
 
       __m256 vy = ${_MM256_OP_PS("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = _mm256_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm256_max_ps(vy, vy_min);
         vy = _mm256_min_ps(vy, vy_max);
@@ -86,6 +94,8 @@
     const __m256 va = _mm256_maskload_ps(a, vmask);
 
     __m256 vy = ${_MM256_OP_PS("va")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy = _mm256_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm256_max_ps(vy, vy_min);
       vy = _mm256_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-avx512f.c.in b/src/f32-vbinary/vopc-avx512f.c.in
index 6460ebe..0548296 100644
--- a/src/f32-vbinary/vopc-avx512f.c.in
+++ b/src/f32-vbinary/vopc-avx512f.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 16 == 0
 $assert BATCH_TILE >= 16
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -26,6 +26,8 @@
 $  "MUL": lambda x: "_mm512_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm512_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm512_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm512_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm512_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 16):
       __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 16):
+        vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 16):
         vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
@@ -71,6 +77,8 @@
       a += 16;
 
       __m512 vy = ${_MM512_OP_PS("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = _mm512_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm512_max_ps(vy, vy_min);
         vy = _mm512_min_ps(vy, vy_max);
@@ -87,6 +95,8 @@
     const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
 
     __m512 vy = ${_MM512_OP_PS("va")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy = _mm512_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm512_max_ps(vy, vy_min);
       vy = _mm512_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-neon.c.in b/src/f32-vbinary/vopc-neon.c.in
index de94db7..e9b6893 100644
--- a/src/f32-vbinary/vopc-neon.c.in
+++ b/src/f32-vbinary/vopc-neon.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "vmulq_f32(%s, vb)" % x,
 $  "SUB": lambda x: "vsubq_f32(%s, vb)" % x,
 $  "RSUB": lambda x: "vsubq_f32(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "vsubq_f32(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "vsubq_f32(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -50,6 +52,10 @@
     $for N in range(0, BATCH_TILE, 4):
       float32x4_t vy${ABC[N:N+4]} = ${VOPQ_F32("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = vmulq_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = vmaxq_f32(vy${ABC[N:N+4]}, vy_min);
@@ -65,6 +71,8 @@
       const float32x4_t va0123 = vld1q_f32(a); a += 4;
 
       float32x4_t vy0123 = ${VOPQ_F32("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = vmulq_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = vmaxq_f32(vy0123, vy_min);
         vy0123 = vminq_f32(vy0123, vy_max);
@@ -74,6 +82,8 @@
     const float32x4_t va0123 = vld1q_f32(a);
 
     float32x4_t vy0123 = ${VOPQ_F32("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = vmulq_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = vmaxq_f32(vy0123, vy_min);
       vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-psimd.c.in b/src/f32-vbinary/vopc-psimd.c.in
index 45877e4..e60a492 100644
--- a/src/f32-vbinary/vopc-psimd.c.in
+++ b/src/f32-vbinary/vopc-psimd.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "psimd_mul_f32(%s, vb)" % x,
 $  "SUB": lambda x: "psimd_sub_f32(%s, vb)" % x,
 $  "RSUB": lambda x: "psimd_sub_f32(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "psimd_sub_f32(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "psimd_sub_f32(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -52,6 +54,10 @@
     $for N in range(0, BATCH_TILE, 4):
       psimd_f32 vy${ABC[N:N+4]} = ${PSIMD_OP_F32("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = psimd_mul_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = psimd_max_f32(vy${ABC[N:N+4]}, vy_min);
@@ -70,6 +76,8 @@
       a += 4;
 
       psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = psimd_mul_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = psimd_max_f32(vy0123, vy_min);
         vy0123 = psimd_min_f32(vy0123, vy_max);
@@ -80,6 +88,8 @@
     const psimd_f32 va0123 = psimd_load_f32(a);
 
     psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = psimd_mul_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = psimd_max_f32(vy0123, vy_min);
       vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-scalar.c.in b/src/f32-vbinary/vopc-scalar.c.in
index cf65499..c39347f 100644
--- a/src/f32-vbinary/vopc-scalar.c.in
+++ b/src/f32-vbinary/vopc-scalar.c.in
@@ -5,7 +5,7 @@
 
 $assert BATCH_TILE >= 1
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "%s * vb" % x,
 $  "SUB": lambda x: "%s - vb" % x,
 $  "RSUB": lambda x: "vb - %s" % x,
+$  "SQRDIFF": lambda x: "%s - vb" % x,
+$  "RSQRDIFF": lambda x: "vb - %s" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -52,6 +54,10 @@
       $for N in range(BATCH_TILE):
         float vy${ABC[N]} = ${OP_FUNC("va" + ABC[N])};
 
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        $for N in range(BATCH_TILE):
+          vy${ABC[N]} = vy${ABC[N]} * vy${ABC[N]};
+
       $if ACTIVATION == "MINMAX":
         $for N in range(BATCH_TILE):
           vy${ABC[N]} = ${MAX_F32}(vy${ABC[N]}, vy_min);
@@ -68,6 +74,8 @@
         do {
           const float va = *a++;
           float vy = ${OP_FUNC("va")};
+          $if OP in ["SQRDIFF", "RSQRDIFF"]:
+            vy = vy * vy;
           $if ACTIVATION == "MINMAX":
             vy = ${MAX_F32}(vy, vy_min);
             vy = ${MIN_F32}(vy, vy_max);
@@ -77,6 +85,8 @@
       $else:
         const float va = *a;
         float vy = ${OP_FUNC("va")};
+        $if OP in ["SQRDIFF", "RSQRDIFF"]:
+          vy = vy * vy;
         $if ACTIVATION == "MINMAX":
           vy = ${MAX_F32}(vy, vy_min);
           vy = ${MIN_F32}(vy, vy_max);
@@ -86,6 +96,8 @@
     for (; n >= sizeof(float); n -= sizeof(float)) {
       const float va = *a++;
       float vy = ${OP_FUNC("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = vy * vy;
       $if ACTIVATION == "MINMAX":
         vy = ${MAX_F32}(vy, vy_min);
         vy = ${MIN_F32}(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-sse.c.in b/src/f32-vbinary/vopc-sse.c.in
index 99d93c7..c4ba0bf 100644
--- a/src/f32-vbinary/vopc-sse.c.in
+++ b/src/f32-vbinary/vopc-sse.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -26,6 +26,8 @@
 $  "MUL": lambda x: "_mm_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 4):
       __m128 vy${ABC[N:N+4]} = ${_MM_OP_PS("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = _mm_max_ps(vy${ABC[N:N+4]}, vy_min);
@@ -71,6 +77,8 @@
       a += 4;
 
       __m128 vy0123 = ${_MM_OP_PS("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = _mm_mul_ps(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = _mm_max_ps(vy0123, vy_min);
         vy0123 = _mm_min_ps(vy0123, vy_max);
@@ -81,6 +89,8 @@
     const __m128 va0123 = _mm_loadu_ps(a);
 
     __m128 vy0123 = ${_MM_OP_PS("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = _mm_mul_ps(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = _mm_max_ps(vy0123, vy_min);
       vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/xnnpack/vbinary.h b/src/xnnpack/vbinary.h
index de28a13..719fa64 100644
--- a/src/xnnpack/vbinary.h
+++ b/src/xnnpack/vbinary.h
@@ -113,6 +113,23 @@
 DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vmin_ukernel__scalar_x2)
 DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vmin_ukernel__scalar_x4)
 
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__neon_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__neon_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__sse_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__sse_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__avx_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__avx_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__avx512f_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__avx512f_x32)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__psimd_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__psimd_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__wasm_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__wasm_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__wasm_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__scalar_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__scalar_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiff_ukernel__scalar_x4)
+
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vadd_minmax_ukernel__neon_x4)
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vadd_minmax_ukernel__neon_x8)
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vadd_minmax_ukernel__sse_x4)
@@ -215,6 +232,40 @@
 DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vminc_ukernel__scalar_x2)
 DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vminc_ukernel__scalar_x4)
 
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__neon_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__neon_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__psimd_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__psimd_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__sse_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__sse_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__avx_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__avx_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__avx512f_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__avx512f_x32)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__wasm_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__wasm_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__wasm_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__scalar_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__scalar_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vsqrdiffc_ukernel__scalar_x4)
+
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__neon_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__neon_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__psimd_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__psimd_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__sse_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__sse_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__avx_x8)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__avx_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__wasm_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__wasm_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__wasm_x4)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__scalar_x1)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__scalar_x2)
+DECLARE_F32_VBINOP_UKERNEL_FUNCTION(xnn_f32_vrsqrdiffc_ukernel__scalar_x4)
+
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vaddc_minmax_ukernel__neon_x4)
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vaddc_minmax_ukernel__neon_x8)
 DECLARE_F32_VBINOP_MINMAX_UKERNEL_FUNCTION(xnn_f32_vaddc_minmax_ukernel__sse_x4)
diff --git a/test/f16-vmax.cc b/test/f16-vmax.cc
index 8ccd530..c20c192 100644
--- a/test/f16-vmax.cc
+++ b/test/f16-vmax.cc
@@ -82,26 +82,6 @@
         .Test(xnn_f16_vmax_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F16_VMAX__NEONFP16ARITH_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmax_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F16_VMAX__NEONFP16ARITH_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmax_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
 
 
@@ -170,24 +150,4 @@
         .Test(xnn_f16_vmax_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F16_VMAX__NEONFP16ARITH_X16, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmax_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F16_VMAX__NEONFP16ARITH_X16, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmax_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vmaxc.cc b/test/f16-vmaxc.cc
index 03f7be7..32be142 100644
--- a/test/f16-vmaxc.cc
+++ b/test/f16-vmaxc.cc
@@ -61,26 +61,6 @@
         .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F16_VMAXC__NEONFP16ARITH_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F16_VMAXC__NEONFP16ARITH_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
 
 
@@ -128,24 +108,4 @@
         .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F16_VMAXC__NEONFP16ARITH_X16, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F16_VMAXC__NEONFP16ARITH_X16, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmaxc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vmin.cc b/test/f16-vmin.cc
index c274058..55fc1db 100644
--- a/test/f16-vmin.cc
+++ b/test/f16-vmin.cc
@@ -82,26 +82,6 @@
         .Test(xnn_f16_vmin_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F16_VMIN__NEONFP16ARITH_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmin_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F16_VMIN__NEONFP16ARITH_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmin_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
 
 
@@ -170,24 +150,4 @@
         .Test(xnn_f16_vmin_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F16_VMIN__NEONFP16ARITH_X16, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vmin_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F16_VMIN__NEONFP16ARITH_X16, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vmin_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vminc.cc b/test/f16-vminc.cc
index cad565e..4bf7fd1 100644
--- a/test/f16-vminc.cc
+++ b/test/f16-vminc.cc
@@ -61,26 +61,6 @@
         .Test(xnn_f16_vminc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F16_VMINC__NEONFP16ARITH_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vminc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F16_VMINC__NEONFP16ARITH_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vminc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
 
 
@@ -128,24 +108,4 @@
         .Test(xnn_f16_vminc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F16_VMINC__NEONFP16ARITH_X16, qmin) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f16_vminc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F16_VMINC__NEONFP16ARITH_X16, qmax) {
-    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f16_vminc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vrsqrdiffc.cc b/test/f16-vrsqrdiffc.cc
new file mode 100644
index 0000000..3e3e4e6
--- /dev/null
+++ b/test/f16-vrsqrdiffc.cc
@@ -0,0 +1,111 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f16-vrsqrdiffc.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinaryc-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F16_VRSQRDIFFC__NEONFP16ARITH_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vrsqrdiffc.yaml b/test/f16-vrsqrdiffc.yaml
new file mode 100644
index 0000000..809a974
--- /dev/null
+++ b/test/f16-vrsqrdiffc.yaml
@@ -0,0 +1,10 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x8
+  arch:
+    - aarch64
+- name: xnn_f16_vrsqrdiffc_ukernel__neonfp16arith_x16
+  arch:
+    - aarch64
diff --git a/test/f16-vsqrdiff.cc b/test/f16-vsqrdiff.cc
new file mode 100644
index 0000000..258d727
--- /dev/null
+++ b/test/f16-vsqrdiff.cc
@@ -0,0 +1,153 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f16-vsqrdiff.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinary-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, inplace_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, inplace_b) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X8, inplace_a_and_b) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, inplace_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, inplace_b) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F16_VSQRDIFF__NEONFP16ARITH_X16, inplace_a_and_b) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vsqrdiff.yaml b/test/f16-vsqrdiff.yaml
new file mode 100644
index 0000000..68c26c4
--- /dev/null
+++ b/test/f16-vsqrdiff.yaml
@@ -0,0 +1,10 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8
+  arch:
+    - aarch64
+- name: xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16
+  arch:
+    - aarch64
diff --git a/test/f16-vsqrdiffc.cc b/test/f16-vsqrdiffc.cc
new file mode 100644
index 0000000..665afd2
--- /dev/null
+++ b/test/f16-vsqrdiffc.cc
@@ -0,0 +1,111 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f16-vsqrdiffc.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinaryc-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM64
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F16_VSQRDIFFC__NEONFP16ARITH_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM64
diff --git a/test/f16-vsqrdiffc.yaml b/test/f16-vsqrdiffc.yaml
new file mode 100644
index 0000000..7c74359
--- /dev/null
+++ b/test/f16-vsqrdiffc.yaml
@@ -0,0 +1,10 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8
+  arch:
+    - aarch64
+- name: xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16
+  arch:
+    - aarch64
diff --git a/test/f32-vmax.cc b/test/f32-vmax.cc
index 6ffe4fe..8c8c660 100644
--- a/test/f32-vmax.cc
+++ b/test/f32-vmax.cc
@@ -82,26 +82,6 @@
         .Test(xnn_f32_vmax_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__NEON_X4, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__NEON_X4, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -170,26 +150,6 @@
         .Test(xnn_f32_vmax_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__NEON_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__NEON_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -258,26 +218,6 @@
         .Test(xnn_f32_vmax_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__SSE_X4, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__SSE_X4, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -346,26 +286,6 @@
         .Test(xnn_f32_vmax_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__SSE_X8, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__SSE_X8, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -434,26 +354,6 @@
         .Test(xnn_f32_vmax_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__AVX_X8, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__AVX_X8, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -522,26 +422,6 @@
         .Test(xnn_f32_vmax_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__AVX_X16, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__AVX_X16, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -610,26 +490,6 @@
         .Test(xnn_f32_vmax_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__AVX512F_X16, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__AVX512F_X16, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -698,26 +558,6 @@
         .Test(xnn_f32_vmax_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Max);
     }
   }
-
-  TEST(F32_VMAX__AVX512F_X32, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
-
-  TEST(F32_VMAX__AVX512F_X32, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Max);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -786,26 +626,6 @@
         .Test(xnn_f32_vmax_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAX__PSIMD_X4, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAX__PSIMD_X4, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -874,26 +694,6 @@
         .Test(xnn_f32_vmax_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAX__PSIMD_X8, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAX__PSIMD_X8, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -939,24 +739,6 @@
         .Test(xnn_f32_vmax_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAX__WASM_X1, qmin) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAX__WASM_X1, qmax) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1018,24 +800,6 @@
         .Test(xnn_f32_vmax_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAX__WASM_X2, qmin) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAX__WASM_X2, qmax) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1097,24 +861,6 @@
         .Test(xnn_f32_vmax_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAX__WASM_X4, qmin) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAX__WASM_X4, qmax) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmax_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1160,23 +906,6 @@
   }
 }
 
-TEST(F32_VMAX__SCALAR_X1, qmin) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAX__SCALAR_X1, qmax) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMAX__SCALAR_X2, batch_eq_2) {
   VBinOpMicrokernelTester()
@@ -1236,23 +965,6 @@
   }
 }
 
-TEST(F32_VMAX__SCALAR_X2, qmin) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAX__SCALAR_X2, qmax) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMAX__SCALAR_X4, batch_eq_4) {
   VBinOpMicrokernelTester()
@@ -1311,21 +1023,3 @@
       .Test(xnn_f32_vmax_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
   }
 }
-
-TEST(F32_VMAX__SCALAR_X4, qmin) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAX__SCALAR_X4, qmax) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmax_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Max, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
\ No newline at end of file
diff --git a/test/f32-vmaxc.cc b/test/f32-vmaxc.cc
index 89de220..cd62037 100644
--- a/test/f32-vmaxc.cc
+++ b/test/f32-vmaxc.cc
@@ -61,26 +61,6 @@
         .Test(xnn_f32_vmaxc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__NEON_X4, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__NEON_X4, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -128,26 +108,6 @@
         .Test(xnn_f32_vmaxc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__NEON_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__NEON_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -195,26 +155,6 @@
         .Test(xnn_f32_vmaxc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__SSE_X4, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__SSE_X4, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -262,26 +202,6 @@
         .Test(xnn_f32_vmaxc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__SSE_X8, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__SSE_X8, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -329,26 +249,6 @@
         .Test(xnn_f32_vmaxc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__AVX_X8, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__AVX_X8, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -396,26 +296,6 @@
         .Test(xnn_f32_vmaxc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__AVX_X16, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__AVX_X16, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -463,26 +343,6 @@
         .Test(xnn_f32_vmaxc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__AVX512F_X16, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__AVX512F_X16, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -530,26 +390,6 @@
         .Test(xnn_f32_vmaxc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MaxC);
     }
   }
-
-  TEST(F32_VMAXC__AVX512F_X32, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
-
-  TEST(F32_VMAXC__AVX512F_X32, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MaxC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -597,26 +437,6 @@
         .Test(xnn_f32_vmaxc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAXC__PSIMD_X4, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAXC__PSIMD_X4, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -664,26 +484,6 @@
         .Test(xnn_f32_vmaxc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAXC__PSIMD_X8, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAXC__PSIMD_X8, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -710,24 +510,6 @@
         .Test(xnn_f32_vmaxc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAXC__WASM_X1, qmin) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAXC__WASM_X1, qmax) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -770,24 +552,6 @@
         .Test(xnn_f32_vmaxc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAXC__WASM_X2, qmin) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAXC__WASM_X2, qmax) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -830,24 +594,6 @@
         .Test(xnn_f32_vmaxc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMAXC__WASM_X4, qmin) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMAXC__WASM_X4, qmax) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmaxc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -874,23 +620,6 @@
   }
 }
 
-TEST(F32_VMAXC__SCALAR_X1, qmin) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAXC__SCALAR_X1, qmax) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMAXC__SCALAR_X2, batch_eq_2) {
   VBinOpCMicrokernelTester()
@@ -931,23 +660,6 @@
   }
 }
 
-TEST(F32_VMAXC__SCALAR_X2, qmin) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAXC__SCALAR_X2, qmax) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMAXC__SCALAR_X4, batch_eq_4) {
   VBinOpCMicrokernelTester()
@@ -987,21 +699,3 @@
       .Test(xnn_f32_vmaxc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
   }
 }
-
-TEST(F32_VMAXC__SCALAR_X4, qmin) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMAXC__SCALAR_X4, qmax) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmaxc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MaxC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
\ No newline at end of file
diff --git a/test/f32-vmin.cc b/test/f32-vmin.cc
index 64fc63c..ef6a39b 100644
--- a/test/f32-vmin.cc
+++ b/test/f32-vmin.cc
@@ -82,26 +82,6 @@
         .Test(xnn_f32_vmin_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__NEON_X4, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__NEON_X4, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -170,26 +150,6 @@
         .Test(xnn_f32_vmin_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__NEON_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__NEON_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -258,26 +218,6 @@
         .Test(xnn_f32_vmin_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__SSE_X4, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__SSE_X4, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -346,26 +286,6 @@
         .Test(xnn_f32_vmin_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__SSE_X8, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__SSE_X8, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -434,26 +354,6 @@
         .Test(xnn_f32_vmin_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__AVX_X8, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__AVX_X8, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -522,26 +422,6 @@
         .Test(xnn_f32_vmin_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__AVX_X16, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__AVX_X16, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -610,26 +490,6 @@
         .Test(xnn_f32_vmin_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__AVX512F_X16, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__AVX512F_X16, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -698,26 +558,6 @@
         .Test(xnn_f32_vmin_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Min);
     }
   }
-
-  TEST(F32_VMIN__AVX512F_X32, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
-
-  TEST(F32_VMIN__AVX512F_X32, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::Min);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -786,26 +626,6 @@
         .Test(xnn_f32_vmin_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMIN__PSIMD_X4, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMIN__PSIMD_X4, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -874,26 +694,6 @@
         .Test(xnn_f32_vmin_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMIN__PSIMD_X8, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMIN__PSIMD_X8, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -939,24 +739,6 @@
         .Test(xnn_f32_vmin_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMIN__WASM_X1, qmin) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMIN__WASM_X1, qmax) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1018,24 +800,6 @@
         .Test(xnn_f32_vmin_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMIN__WASM_X2, qmin) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMIN__WASM_X2, qmax) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1097,24 +861,6 @@
         .Test(xnn_f32_vmin_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMIN__WASM_X4, qmin) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMIN__WASM_X4, qmax) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vmin_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -1160,23 +906,6 @@
   }
 }
 
-TEST(F32_VMIN__SCALAR_X1, qmin) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMIN__SCALAR_X1, qmax) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMIN__SCALAR_X2, batch_eq_2) {
   VBinOpMicrokernelTester()
@@ -1236,23 +965,6 @@
   }
 }
 
-TEST(F32_VMIN__SCALAR_X2, qmin) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMIN__SCALAR_X2, qmax) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMIN__SCALAR_X4, batch_eq_4) {
   VBinOpMicrokernelTester()
@@ -1311,21 +1023,3 @@
       .Test(xnn_f32_vmin_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
   }
 }
-
-TEST(F32_VMIN__SCALAR_X4, qmin) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMIN__SCALAR_X4, qmax) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vmin_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::Min, VBinOpMicrokernelTester::Variant::Scalar);
-  }
-}
\ No newline at end of file
diff --git a/test/f32-vminc.cc b/test/f32-vminc.cc
index 6643e6b..b2c3713 100644
--- a/test/f32-vminc.cc
+++ b/test/f32-vminc.cc
@@ -61,26 +61,6 @@
         .Test(xnn_f32_vminc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__NEON_X4, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__NEON_X4, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -128,26 +108,6 @@
         .Test(xnn_f32_vminc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__NEON_X8, qmin) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__NEON_X8, qmax) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
@@ -195,26 +155,6 @@
         .Test(xnn_f32_vminc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__SSE_X4, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__SSE_X4, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -262,26 +202,6 @@
         .Test(xnn_f32_vminc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__SSE_X8, qmin) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__SSE_X8, qmax) {
-    TEST_REQUIRES_X86_SSE;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -329,26 +249,6 @@
         .Test(xnn_f32_vminc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__AVX_X8, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__AVX_X8, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -396,26 +296,6 @@
         .Test(xnn_f32_vminc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__AVX_X16, qmin) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__AVX_X16, qmax) {
-    TEST_REQUIRES_X86_AVX;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -463,26 +343,6 @@
         .Test(xnn_f32_vminc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__AVX512F_X16, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__AVX512F_X16, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -530,26 +390,6 @@
         .Test(xnn_f32_vminc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MinC);
     }
   }
-
-  TEST(F32_VMINC__AVX512F_X32, qmin) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
-
-  TEST(F32_VMINC__AVX512F_X32, qmax) {
-    TEST_REQUIRES_X86_AVX512F;
-    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::MinC);
-    }
-  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 
@@ -597,26 +437,6 @@
         .Test(xnn_f32_vminc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMINC__PSIMD_X4, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMINC__PSIMD_X4, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -664,26 +484,6 @@
         .Test(xnn_f32_vminc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMINC__PSIMD_X8, qmin) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMINC__PSIMD_X8, qmax) {
-    TEST_REQUIRES_PSIMD;
-    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
 
 
@@ -710,24 +510,6 @@
         .Test(xnn_f32_vminc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMINC__WASM_X1, qmin) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMINC__WASM_X1, qmax) {
-    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -770,24 +552,6 @@
         .Test(xnn_f32_vminc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMINC__WASM_X2, qmin) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMINC__WASM_X2, qmax) {
-    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -830,24 +594,6 @@
         .Test(xnn_f32_vminc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
     }
   }
-
-  TEST(F32_VMINC__WASM_X4, qmin) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmin(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
-
-  TEST(F32_VMINC__WASM_X4, qmax) {
-    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-      VBinOpCMicrokernelTester()
-        .batch_size(batch_size)
-        .qmax(128)
-        .Test(xnn_f32_vminc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-    }
-  }
 #endif  // XNN_ARCH_WASM
 
 
@@ -874,23 +620,6 @@
   }
 }
 
-TEST(F32_VMINC__SCALAR_X1, qmin) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMINC__SCALAR_X1, qmax) {
-  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMINC__SCALAR_X2, batch_eq_2) {
   VBinOpCMicrokernelTester()
@@ -931,23 +660,6 @@
   }
 }
 
-TEST(F32_VMINC__SCALAR_X2, qmin) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMINC__SCALAR_X2, qmax) {
-  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
 
 TEST(F32_VMINC__SCALAR_X4, batch_eq_4) {
   VBinOpCMicrokernelTester()
@@ -987,21 +699,3 @@
       .Test(xnn_f32_vminc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
   }
 }
-
-TEST(F32_VMINC__SCALAR_X4, qmin) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
-
-TEST(F32_VMINC__SCALAR_X4, qmax) {
-  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
-    VBinOpCMicrokernelTester()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(xnn_f32_vminc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::MinC, VBinOpCMicrokernelTester::Variant::Scalar);
-  }
-}
\ No newline at end of file
diff --git a/test/f32-vrsqrdiffc.cc b/test/f32-vrsqrdiffc.cc
new file mode 100644
index 0000000..5016bcf
--- /dev/null
+++ b/test/f32-vrsqrdiffc.cc
@@ -0,0 +1,701 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f32-vrsqrdiffc.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinaryc-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VRSQRDIFFC__NEON_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VRSQRDIFFC__NEON_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__NEON_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__SSE_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X4, inplace) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__SSE_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__SSE_X8, inplace) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__AVX_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X8, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__AVX_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX_X16, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__AVX512F_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X16, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VRSQRDIFFC__AVX512F_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpCMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__AVX512F_X32, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::RSqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VRSQRDIFFC__PSIMD_X4, batch_eq_4) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X4, batch_div_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X4, batch_lt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X4, batch_gt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X4, inplace) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VRSQRDIFFC__PSIMD_X8, batch_eq_8) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X8, batch_div_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X8, batch_lt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X8, batch_gt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__PSIMD_X8, inplace) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VRSQRDIFFC__WASM_X1, batch_eq_1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(1)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X1, batch_gt_1) {
+    for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X1, inplace) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VRSQRDIFFC__WASM_X2, batch_eq_2) {
+    VBinOpCMicrokernelTester()
+      .batch_size(2)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X2, batch_div_2) {
+    for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X2, batch_lt_2) {
+    for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X2, batch_gt_2) {
+    for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X2, inplace) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VRSQRDIFFC__WASM_X4, batch_eq_4) {
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VRSQRDIFFC__WASM_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vrsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+TEST(F32_VRSQRDIFFC__SCALAR_X1, batch_eq_1) {
+  VBinOpCMicrokernelTester()
+    .batch_size(1)
+    .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X1, batch_gt_1) {
+  for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X1, inplace) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VRSQRDIFFC__SCALAR_X2, batch_eq_2) {
+  VBinOpCMicrokernelTester()
+    .batch_size(2)
+    .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X2, batch_div_2) {
+  for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X2, batch_lt_2) {
+  for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X2, batch_gt_2) {
+  for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X2, inplace) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VRSQRDIFFC__SCALAR_X4, batch_eq_4) {
+  VBinOpCMicrokernelTester()
+    .batch_size(4)
+    .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X4, batch_div_4) {
+  for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X4, batch_lt_4) {
+  for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X4, batch_gt_4) {
+  for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VRSQRDIFFC__SCALAR_X4, inplace) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vrsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::RSqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
diff --git a/test/f32-vrsqrdiffc.yaml b/test/f32-vrsqrdiffc.yaml
new file mode 100644
index 0000000..dc377ce
--- /dev/null
+++ b/test/f32-vrsqrdiffc.yaml
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_vrsqrdiffc_ukernel__neon_x4
+- name: xnn_f32_vrsqrdiffc_ukernel__neon_x8
+- name: xnn_f32_vrsqrdiffc_ukernel__sse_x4
+- name: xnn_f32_vrsqrdiffc_ukernel__sse_x8
+- name: xnn_f32_vrsqrdiffc_ukernel__avx_x8
+- name: xnn_f32_vrsqrdiffc_ukernel__avx_x16
+- name: xnn_f32_vrsqrdiffc_ukernel__avx512f_x16
+- name: xnn_f32_vrsqrdiffc_ukernel__avx512f_x32
+- name: xnn_f32_vrsqrdiffc_ukernel__psimd_x4
+- name: xnn_f32_vrsqrdiffc_ukernel__psimd_x8
+- name: xnn_f32_vrsqrdiffc_ukernel__wasm_x1
+- name: xnn_f32_vrsqrdiffc_ukernel__wasm_x2
+- name: xnn_f32_vrsqrdiffc_ukernel__wasm_x4
+- name: xnn_f32_vrsqrdiffc_ukernel__scalar_x1
+- name: xnn_f32_vrsqrdiffc_ukernel__scalar_x2
+- name: xnn_f32_vrsqrdiffc_ukernel__scalar_x4
diff --git a/test/f32-vsqrdiff.cc b/test/f32-vsqrdiff.cc
new file mode 100644
index 0000000..2628da6
--- /dev/null
+++ b/test/f32-vsqrdiff.cc
@@ -0,0 +1,1025 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f32-vsqrdiff.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinary-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VSQRDIFF__NEON_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, inplace_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, inplace_b) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X4, inplace_a_and_b) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VSQRDIFF__NEON_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, inplace_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, inplace_b) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__NEON_X8, inplace_a_and_b) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__neon_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__SSE_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, inplace_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, inplace_b) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X4, inplace_a_and_b) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x4, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__SSE_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, inplace_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, inplace_b) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__SSE_X8, inplace_a_and_b) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__sse_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__AVX_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, inplace_a) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, inplace_b) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X8, inplace_a_and_b) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x8, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__AVX_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, inplace_a) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, inplace_b) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX_X16, inplace_a_and_b) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__AVX512F_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, inplace_a) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, inplace_b) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X16, inplace_a_and_b) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x16, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFF__AVX512F_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, inplace_a) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, inplace_b) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__AVX512F_X32, inplace_a_and_b) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__avx512f_x32, VBinOpMicrokernelTester::OpType::SqrDiff);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VSQRDIFF__PSIMD_X4, batch_eq_4) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, batch_div_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, batch_lt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, batch_gt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, inplace_a) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, inplace_b) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X4, inplace_a_and_b) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VSQRDIFF__PSIMD_X8, batch_eq_8) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, batch_div_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, batch_lt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, batch_gt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, inplace_a) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, inplace_b) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__PSIMD_X8, inplace_a_and_b) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__psimd_x8, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFF__WASM_X1, batch_eq_1) {
+    VBinOpMicrokernelTester()
+      .batch_size(1)
+      .Test(xnn_f32_vsqrdiff_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X1, batch_gt_1) {
+    for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X1, inplace_a) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X1, inplace_b) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X1, inplace_a_and_b) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFF__WASM_X2, batch_eq_2) {
+    VBinOpMicrokernelTester()
+      .batch_size(2)
+      .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, batch_div_2) {
+    for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, batch_lt_2) {
+    for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, batch_gt_2) {
+    for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, inplace_a) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, inplace_b) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X2, inplace_a_and_b) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFF__WASM_X4, batch_eq_4) {
+    VBinOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, inplace_a) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, inplace_b) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFF__WASM_X4, inplace_a_and_b) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace_a(true)
+        .inplace_b(true)
+        .Test(xnn_f32_vsqrdiff_ukernel__wasm_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+TEST(F32_VSQRDIFF__SCALAR_X1, batch_eq_1) {
+  VBinOpMicrokernelTester()
+    .batch_size(1)
+    .Test(xnn_f32_vsqrdiff_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X1, batch_gt_1) {
+  for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X1, inplace_a) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X1, inplace_b) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X1, inplace_a_and_b) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x1, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VSQRDIFF__SCALAR_X2, batch_eq_2) {
+  VBinOpMicrokernelTester()
+    .batch_size(2)
+    .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, batch_div_2) {
+  for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, batch_lt_2) {
+  for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, batch_gt_2) {
+  for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, inplace_a) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, inplace_b) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X2, inplace_a_and_b) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x2, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VSQRDIFF__SCALAR_X4, batch_eq_4) {
+  VBinOpMicrokernelTester()
+    .batch_size(4)
+    .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, batch_div_4) {
+  for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, batch_lt_4) {
+  for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, batch_gt_4) {
+  for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, inplace_a) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, inplace_b) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFF__SCALAR_X4, inplace_a_and_b) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VBinOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace_a(true)
+      .inplace_b(true)
+      .Test(xnn_f32_vsqrdiff_ukernel__scalar_x4, VBinOpMicrokernelTester::OpType::SqrDiff, VBinOpMicrokernelTester::Variant::Scalar);
+  }
+}
diff --git a/test/f32-vsqrdiff.yaml b/test/f32-vsqrdiff.yaml
new file mode 100644
index 0000000..24cc8cf
--- /dev/null
+++ b/test/f32-vsqrdiff.yaml
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_vsqrdiff_ukernel__neon_x4
+- name: xnn_f32_vsqrdiff_ukernel__neon_x8
+- name: xnn_f32_vsqrdiff_ukernel__sse_x4
+- name: xnn_f32_vsqrdiff_ukernel__sse_x8
+- name: xnn_f32_vsqrdiff_ukernel__avx_x8
+- name: xnn_f32_vsqrdiff_ukernel__avx_x16
+- name: xnn_f32_vsqrdiff_ukernel__avx512f_x16
+- name: xnn_f32_vsqrdiff_ukernel__avx512f_x32
+- name: xnn_f32_vsqrdiff_ukernel__psimd_x4
+- name: xnn_f32_vsqrdiff_ukernel__psimd_x8
+- name: xnn_f32_vsqrdiff_ukernel__wasm_x1
+- name: xnn_f32_vsqrdiff_ukernel__wasm_x2
+- name: xnn_f32_vsqrdiff_ukernel__wasm_x4
+- name: xnn_f32_vsqrdiff_ukernel__scalar_x1
+- name: xnn_f32_vsqrdiff_ukernel__scalar_x2
+- name: xnn_f32_vsqrdiff_ukernel__scalar_x4
diff --git a/test/f32-vsqrdiffc.cc b/test/f32-vsqrdiffc.cc
new file mode 100644
index 0000000..7c0feb1
--- /dev/null
+++ b/test/f32-vsqrdiffc.cc
@@ -0,0 +1,701 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f32-vsqrdiffc.yaml
+//   Generator: tools/generate-vbinary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vbinary.h>
+#include "vbinaryc-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VSQRDIFFC__NEON_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VSQRDIFFC__NEON_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__NEON_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__neon_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__SSE_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X4, inplace) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__SSE_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__SSE_X8, inplace) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__sse_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__AVX_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X8, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__AVX_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX_X16, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__AVX512F_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpCMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X16, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x16, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VSQRDIFFC__AVX512F_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    VBinOpCMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__AVX512F_X32, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__avx512f_x32, VBinOpCMicrokernelTester::OpType::SqrDiffC);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VSQRDIFFC__PSIMD_X4, batch_eq_4) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X4, batch_div_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X4, batch_lt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X4, batch_gt_4) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X4, inplace) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+  TEST(F32_VSQRDIFFC__PSIMD_X8, batch_eq_8) {
+    TEST_REQUIRES_PSIMD;
+    VBinOpCMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X8, batch_div_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X8, batch_lt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X8, batch_gt_8) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__PSIMD_X8, inplace) {
+    TEST_REQUIRES_PSIMD;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__psimd_x8, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // !XNN_ARCH_ASMJS && !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFFC__WASM_X1, batch_eq_1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(1)
+      .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X1, batch_gt_1) {
+    for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X1, inplace) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFFC__WASM_X2, batch_eq_2) {
+    VBinOpCMicrokernelTester()
+      .batch_size(2)
+      .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X2, batch_div_2) {
+    for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X2, batch_lt_2) {
+    for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X2, batch_gt_2) {
+    for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X2, inplace) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+#if XNN_ARCH_WASM
+  TEST(F32_VSQRDIFFC__WASM_X4, batch_eq_4) {
+    VBinOpCMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+
+  TEST(F32_VSQRDIFFC__WASM_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VBinOpCMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vsqrdiffc_ukernel__wasm_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+    }
+  }
+#endif  // XNN_ARCH_WASM
+
+
+TEST(F32_VSQRDIFFC__SCALAR_X1, batch_eq_1) {
+  VBinOpCMicrokernelTester()
+    .batch_size(1)
+    .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X1, batch_gt_1) {
+  for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X1, inplace) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x1, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VSQRDIFFC__SCALAR_X2, batch_eq_2) {
+  VBinOpCMicrokernelTester()
+    .batch_size(2)
+    .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X2, batch_div_2) {
+  for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X2, batch_lt_2) {
+  for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X2, batch_gt_2) {
+  for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X2, inplace) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x2, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+
+TEST(F32_VSQRDIFFC__SCALAR_X4, batch_eq_4) {
+  VBinOpCMicrokernelTester()
+    .batch_size(4)
+    .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X4, batch_div_4) {
+  for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X4, batch_lt_4) {
+  for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X4, batch_gt_4) {
+  for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VSQRDIFFC__SCALAR_X4, inplace) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VBinOpCMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vsqrdiffc_ukernel__scalar_x4, VBinOpCMicrokernelTester::OpType::SqrDiffC, VBinOpCMicrokernelTester::Variant::Scalar);
+  }
+}
diff --git a/test/f32-vsqrdiffc.yaml b/test/f32-vsqrdiffc.yaml
new file mode 100644
index 0000000..1068c71
--- /dev/null
+++ b/test/f32-vsqrdiffc.yaml
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_vsqrdiffc_ukernel__neon_x4
+- name: xnn_f32_vsqrdiffc_ukernel__neon_x8
+- name: xnn_f32_vsqrdiffc_ukernel__sse_x4
+- name: xnn_f32_vsqrdiffc_ukernel__sse_x8
+- name: xnn_f32_vsqrdiffc_ukernel__avx_x8
+- name: xnn_f32_vsqrdiffc_ukernel__avx_x16
+- name: xnn_f32_vsqrdiffc_ukernel__avx512f_x16
+- name: xnn_f32_vsqrdiffc_ukernel__avx512f_x32
+- name: xnn_f32_vsqrdiffc_ukernel__psimd_x4
+- name: xnn_f32_vsqrdiffc_ukernel__psimd_x8
+- name: xnn_f32_vsqrdiffc_ukernel__wasm_x1
+- name: xnn_f32_vsqrdiffc_ukernel__wasm_x2
+- name: xnn_f32_vsqrdiffc_ukernel__wasm_x4
+- name: xnn_f32_vsqrdiffc_ukernel__scalar_x1
+- name: xnn_f32_vsqrdiffc_ukernel__scalar_x2
+- name: xnn_f32_vsqrdiffc_ukernel__scalar_x4
diff --git a/test/vbinary-microkernel-tester.h b/test/vbinary-microkernel-tester.h
index ca70b71..f7fc0e9 100644
--- a/test/vbinary-microkernel-tester.h
+++ b/test/vbinary-microkernel-tester.h
@@ -31,6 +31,7 @@
     Min,
     Mul,
     Sub,
+    SqrDiff,
   };
 
   enum class Variant {
@@ -132,6 +133,12 @@
           case OpType::Mul:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]);
             break;
+          case OpType::SqrDiff:
+          {
+            const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::Sub:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
             break;
@@ -188,6 +195,12 @@
           case OpType::Mul:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]);
             break;
+          case OpType::SqrDiff:
+          {
+            const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::Sub:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
             break;
@@ -261,6 +274,12 @@
           case OpType::Mul:
             y_ref[i] = a_data[i] * b_data[i];
             break;
+          case OpType::SqrDiff:
+          {
+            const float diff = a_data[i] - b_data[i];
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::Sub:
             y_ref[i] = a_data[i] - b_data[i];
             break;
@@ -316,6 +335,12 @@
           case OpType::Mul:
             y_ref[i] = a_data[i] * b_data[i];
             break;
+          case OpType::SqrDiff:
+          {
+            const float diff = a_data[i] - b_data[i];
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::Sub:
             y_ref[i] = a_data[i] - b_data[i];
             break;
diff --git a/test/vbinaryc-microkernel-tester.h b/test/vbinaryc-microkernel-tester.h
index 6297259..ac43806 100644
--- a/test/vbinaryc-microkernel-tester.h
+++ b/test/vbinaryc-microkernel-tester.h
@@ -31,6 +31,8 @@
     MaxC,
     MinC,
     MulC,
+    SqrDiffC,
+    RSqrDiffC,
     SubC,
     RSubC,
   };
@@ -126,6 +128,18 @@
           case OpType::MulC:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b);
             break;
+          case OpType::SqrDiffC:
+          {
+            const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
+            y_ref[i] = diff * diff;
+            break;
+          }
+          case OpType::RSqrDiffC:
+          {
+            const float diff = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]);
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::SubC:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
             break;
@@ -185,6 +199,18 @@
           case OpType::MulC:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b);
             break;
+          case OpType::SqrDiffC:
+          {
+            const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
+            y_ref[i] = diff * diff;
+            break;
+          }
+          case OpType::RSqrDiffC:
+          {
+            const float diff = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]);
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::SubC:
             y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
             break;
@@ -261,6 +287,18 @@
           case OpType::MulC:
             y_ref[i] = a_data[i] * b;
             break;
+          case OpType::SqrDiffC:
+          {
+            const float diff = a_data[i] - b;
+            y_ref[i] = diff * diff;
+            break;
+          }
+          case OpType::RSqrDiffC:
+          {
+            const float diff = b - a_data[i];
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::SubC:
             y_ref[i] = a_data[i] - b;
             break;
@@ -319,6 +357,18 @@
           case OpType::MulC:
             y_ref[i] = a_data[i] * b;
             break;
+          case OpType::SqrDiffC:
+          {
+            const float diff = a_data[i] - b;
+            y_ref[i] = diff * diff;
+            break;
+          }
+          case OpType::RSqrDiffC:
+          {
+            const float diff = b - a_data[i];
+            y_ref[i] = diff * diff;
+            break;
+          }
           case OpType::SubC:
             y_ref[i] = a_data[i] - b;
             break;
diff --git a/tools/generate-vbinary-test.py b/tools/generate-vbinary-test.py
index 5e00d7c..4a0cc04 100755
--- a/tools/generate-vbinary-test.py
+++ b/tools/generate-vbinary-test.py
@@ -27,7 +27,7 @@
 
 
 def split_ukernel_name(name):
-  match = re.match(r"^xnn_(f16|f32)_v(add|div|max|min|mul|sub|addc|divc|rdivc|maxc|minc|mulc|subc|rsubc)(_(minmax))?_ukernel__(.+)_x(\d+)$", name)
+  match = re.match(r"^xnn_(f16|f32)_v(add|div|max|min|mul|sqrdiff|sub|addc|divc|rdivc|maxc|minc|mulc|sqrdiffc|rsqrdiffc|subc|rsubc)(_(minmax))?_ukernel__(.+)_x(\d+)$", name)
   if match is None:
     raise ValueError("Unexpected microkernel name: " + name)
   op_type = {
@@ -36,6 +36,7 @@
     "max": "Max",
     "min": "Min",
     "mul": "Mul",
+    "sqrdiff": "SqrDiff",
     "sub": "Sub",
     "addc": "AddC",
     "divc": "DivC",
@@ -43,13 +44,21 @@
     "maxc": "MaxC",
     "minc": "MinC",
     "mulc": "MulC",
+    "sqrdiffc": "SqrDiffC",
+    "rsqrdiffc": "RSqrDiffC",
     "subc": "SubC",
     "rsubc": "RSubC",
   }[match.group(2)]
   batch_tile = int(match.group(6))
 
+  activation_type = match.group(4)
+  if activation_type is None:
+    activation_type = "LINEAR"
+  else:
+    activation_type = activation_type.upper()
+
   arch, isa = xnncommon.parse_target_name(target_name=match.group(5))
-  return op_type, batch_tile, arch, isa
+  return op_type, activation_type, batch_tile, arch, isa
 
 
 BINOP_TEST_TEMPLATE = """\
@@ -138,36 +147,38 @@
     }
   }
 
-TEST(${TEST_NAME}, qmin) {
-  $if ISA_CHECK:
-    ${ISA_CHECK};
-  for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
-    ${TESTER}()
-      .batch_size(batch_size)
-      .qmin(128)
-      .Test(${", ".join(TEST_ARGS)});
+$if ACTIVATION_TYPE == "MINMAX":
+  TEST(${TEST_NAME}, qmin) {
+    $if ISA_CHECK:
+      ${ISA_CHECK};
+    for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
+      ${TESTER}()
+        .batch_size(batch_size)
+        .qmin(128)
+        .Test(${", ".join(TEST_ARGS)});
+    }
   }
-}
 
-TEST(${TEST_NAME}, qmax) {
-  $if ISA_CHECK:
-    ${ISA_CHECK};
-  for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
-    ${TESTER}()
-      .batch_size(batch_size)
-      .qmax(128)
-      .Test(${", ".join(TEST_ARGS)});
+  TEST(${TEST_NAME}, qmax) {
+    $if ISA_CHECK:
+      ${ISA_CHECK};
+    for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
+      ${TESTER}()
+        .batch_size(batch_size)
+        .qmax(128)
+        .Test(${", ".join(TEST_ARGS)});
+    }
   }
-}
 """
 
 
-def generate_test_cases(ukernel, op_type, batch_tile, isa):
+def generate_test_cases(ukernel, op_type, activation_type, batch_tile, isa):
   """Generates all tests cases for a Vector Binary Operation micro-kernel.
 
   Args:
     ukernel: C name of the micro-kernel function.
     op_type: Operation type (ADD/MUL/SUB/etc).
+    activation_type: Activation type (LINEAR/MINMAX/RELU).
     batch_tile: Number of batch elements processed per one iteration of the
                 inner loop of the micro-kernel.
     isa: instruction set required to run the micro-kernel. Generated unit test
@@ -192,6 +203,7 @@
       "DATATYPE": datatype,
       "BATCH_TILE": batch_tile,
       "OP_TYPE": op_type,
+      "ACTIVATION_TYPE": activation_type,
       "ISA_CHECK": xnncommon.generate_isa_check_macro(isa),
     })
 
@@ -232,12 +244,13 @@
 
     for ukernel_spec in spec_yaml:
       name = ukernel_spec["name"]
-      op_type, batch_tile, arch, isa = split_ukernel_name(name)
+      op_type, activation_type, batch_tile, arch, isa = split_ukernel_name(name)
 
       # specification can override architecture
       arch = ukernel_spec.get("arch", arch)
 
-      test_case = generate_test_cases(name, op_type, batch_tile, isa)
+      test_case = generate_test_cases(name, op_type, activation_type,
+                                      batch_tile, isa)
       tests += "\n\n" + xnncommon.postprocess_test_case(test_case, arch, isa)
 
     with codecs.open(options.output, "w", encoding="utf-8") as output_file: