SQRDIFF (Squared Difference) microkernels

PiperOrigin-RevId: 314881351
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx-x16.c b/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
index b799115..2722f82 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_add_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx-x8.c b/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
index cd3fd03..6292a1e 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
index 27b9960..0524e99 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
index ecb71ab..f33cefb 100644
--- a/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vadd-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_add_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-neon-x4.c b/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
index f0f9ed5..ddca8e2 100644
--- a/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vaddq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-neon-x8.c b/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
index b2e074f..9f4ff35 100644
--- a/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vaddq_f32(va0123, vb0123);
     float32x4_t vy4567 = vaddq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c b/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
index 95addcf..7a8e7dd 100644
--- a/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c b/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
index 5fef417..6de0617 100644
--- a/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_add_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c b/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
index 63093fd..0bfbd55 100644
--- a/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vadd-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 + vb0;
     float vy1 = va1 + vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c b/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
index 2e182e0..8fbabcb 100644
--- a/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 + vb2;
     float vy3 = va3 + vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vadd-minmax-sse-x4.c b/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
index a1079d3..0990f78 100644
--- a/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_add_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vadd-minmax-sse-x8.c b/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
index e661dc4..c6f9ac6 100644
--- a/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vadd-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_add_ps(va0123, vb0123);
     __m128 vy4567 = _mm_add_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c b/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
index 6e0a742..67c3c2a 100644
--- a/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vadd-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 + vb0;
     float vy1 = va1 + vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c b/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
index 27219bb..ddbafca 100644
--- a/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vadd-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 + vb2;
     float vy3 = va3 + vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c b/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
index cdbf270..140c3dc 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_add_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c b/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
index 2d95818..f18a121 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_add_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
index 7536546..7407b41 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
index 4eb57d8..c0cb054 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_add_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_add_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c b/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
index d68aa6f..d1b1d38 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vaddq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c b/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
index b2f16d6..3c3fe34 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vaddq_f32(va0123, vb);
     float32x4_t vy4567 = vaddq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
index 68f2b44..c9e4231 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
index 9dfb664..c2f1f24 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_add_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_add_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
index d9ff3cf..e183f22 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 + vb;
     float vy1 = va1 + vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
index 7652791..51cb7e6 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 + vb;
     float vy3 = va3 + vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c b/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
index 57181d4..1eb07d4 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_add_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c b/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
index bfd25cd..a169ca6 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_add_ps(va0123, vb);
     __m128 vy4567 = _mm_add_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
index 78dc878..44c66cc 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 + vb;
     float vy1 = va1 + vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
index 5c958c3..def140a 100644
--- a/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vaddc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 + vb;
     float vy3 = va3 + vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c b/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
index e2e6f6d..134ad9a 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c b/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
index a4e3283..0f01664 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
index bd602b6..61f290b 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
index 784d575..52ef1da 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c b/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
index a905e67..bb8c8d3 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c b/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
index 687682e..1ad0784 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vdivq_f32(va0123, vb0123);
     float32x4_t vy4567 = vdivq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c b/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
index 48d73bb..6186946 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c b/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
index 184c35f..e5016cc 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_div_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c b/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
index b1b9e4a..824c2bd 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 / vb0;
     float vy1 = va1 / vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c b/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
index 2effa49..3d6a8cd 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 / vb2;
     float vy3 = va3 / vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c b/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
index d9df6d6..2c69c00 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_div_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c b/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
index deea4ce..42e11c6 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_div_ps(va0123, vb0123);
     __m128 vy4567 = _mm_div_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c b/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
index 9f045b1..f7a5cce 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 / vb0;
     float vy1 = va1 / vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c b/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
index e472e86..212b0d6 100644
--- a/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vdiv-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 / vb2;
     float vy3 = va3 / vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c b/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
index 6411097..945916c 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c b/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
index 9a51c51..e612f0f 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
index 9a90ed3..1163351 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
index 4f535fe..1a84a0e 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c b/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
index d669455..5bb8b5f 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c b/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
index c65cf67..1517447 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vdivq_f32(va0123, vb);
     float32x4_t vy4567 = vdivq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
index 4334692..409a945 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
index a73a171..9f05b23 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_div_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_div_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
index 520b555..954af51 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 / vb;
     float vy1 = va1 / vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
index 113cdbf..0d05701 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 / vb;
     float vy3 = va3 / vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c b/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
index 6784a57..0ab1ff9 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_div_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c b/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
index 033a817..7d36f03 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_div_ps(va0123, vb);
     __m128 vy4567 = _mm_div_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
index 41363d6..eafeef2 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 / vb;
     float vy1 = va1 / vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
index 6e36564..f46d880 100644
--- a/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vdivc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 / vb;
     float vy3 = va3 / vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmax-avx-x16.c b/src/f32-vbinary/gen/vmax-avx-x16.c
index 438f0bb..df889aa 100644
--- a/src/f32-vbinary/gen/vmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmax-avx-x16.c
@@ -41,6 +41,7 @@
     __m256 vy89ABCDEF = _mm256_max_ps(va89ABCDEF, vb89ABCDEF);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmax-avx-x8.c b/src/f32-vbinary/gen/vmax-avx-x8.c
index 92de68f..df7c785 100644
--- a/src/f32-vbinary/gen/vmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmax-avx-x8.c
@@ -38,6 +38,7 @@
     __m256 vy01234567 = _mm256_max_ps(va01234567, vb01234567);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmax-avx512f-x16.c b/src/f32-vbinary/gen/vmax-avx512f-x16.c
index c5d324d..505a792 100644
--- a/src/f32-vbinary/gen/vmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmax-avx512f-x16.c
@@ -37,6 +37,7 @@
     __m512 vy0123456789ABCDEF = _mm512_max_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmax-avx512f-x32.c b/src/f32-vbinary/gen/vmax-avx512f-x32.c
index 469bf65..26768c8 100644
--- a/src/f32-vbinary/gen/vmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmax-avx512f-x32.c
@@ -40,6 +40,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmax-neon-x4.c b/src/f32-vbinary/gen/vmax-neon-x4.c
index 2dddd9c..ebbc034 100644
--- a/src/f32-vbinary/gen/vmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmax-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vmaxq_f32(va0123, vb0123);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmax-neon-x8.c b/src/f32-vbinary/gen/vmax-neon-x8.c
index f8a5ce1..79fc7a2 100644
--- a/src/f32-vbinary/gen/vmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy4567 = vmaxq_f32(va4567, vb4567);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-psimd-x4.c b/src/f32-vbinary/gen/vmax-psimd-x4.c
index ae20426..001490d 100644
--- a/src/f32-vbinary/gen/vmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmax-psimd-x4.c
@@ -36,6 +36,7 @@
     psimd_f32 vy0123 = psimd_max_f32(va0123, vb0123);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-psimd-x8.c b/src/f32-vbinary/gen/vmax-psimd-x8.c
index af455a9..f6396e0 100644
--- a/src/f32-vbinary/gen/vmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmax-psimd-x8.c
@@ -39,6 +39,7 @@
     psimd_f32 vy4567 = psimd_max_f32(va4567, vb4567);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmax-scalar-x2.c b/src/f32-vbinary/gen/vmax-scalar-x2.c
index 59315bb..3c41548 100644
--- a/src/f32-vbinary/gen/vmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmax-scalar-x2.c
@@ -38,6 +38,7 @@
     float vy1 = math_max_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmax-scalar-x4.c b/src/f32-vbinary/gen/vmax-scalar-x4.c
index 0f3102c..729cbd8 100644
--- a/src/f32-vbinary/gen/vmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmax-scalar-x4.c
@@ -44,6 +44,7 @@
     float vy3 = math_max_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmax-sse-x4.c b/src/f32-vbinary/gen/vmax-sse-x4.c
index 104446c..41be09c 100644
--- a/src/f32-vbinary/gen/vmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmax-sse-x4.c
@@ -37,6 +37,7 @@
     __m128 vy0123 = _mm_max_ps(va0123, vb0123);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmax-sse-x8.c b/src/f32-vbinary/gen/vmax-sse-x8.c
index 225873f..095f75f 100644
--- a/src/f32-vbinary/gen/vmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmax-sse-x8.c
@@ -40,6 +40,7 @@
     __m128 vy4567 = _mm_max_ps(va4567, vb4567);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmax-wasm-x2.c b/src/f32-vbinary/gen/vmax-wasm-x2.c
index 70bb55d..813b12e 100644
--- a/src/f32-vbinary/gen/vmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmax-wasm-x2.c
@@ -38,6 +38,7 @@
     float vy1 = __builtin_wasm_max_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmax-wasm-x4.c b/src/f32-vbinary/gen/vmax-wasm-x4.c
index f8d36cc..556703d 100644
--- a/src/f32-vbinary/gen/vmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmax-wasm-x4.c
@@ -44,6 +44,7 @@
     float vy3 = __builtin_wasm_max_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmaxc-avx-x16.c b/src/f32-vbinary/gen/vmaxc-avx-x16.c
index c2d7ce7..ead7d47 100644
--- a/src/f32-vbinary/gen/vmaxc-avx-x16.c
+++ b/src/f32-vbinary/gen/vmaxc-avx-x16.c
@@ -38,6 +38,7 @@
     __m256 vy89ABCDEF = _mm256_max_ps(va89ABCDEF, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmaxc-avx-x8.c b/src/f32-vbinary/gen/vmaxc-avx-x8.c
index 0c3c83f..91f55d2 100644
--- a/src/f32-vbinary/gen/vmaxc-avx-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-avx-x8.c
@@ -36,6 +36,7 @@
     __m256 vy01234567 = _mm256_max_ps(va01234567, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-avx512f-x16.c b/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
index dee5815..356d90d 100644
--- a/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmaxc-avx512f-x16.c
@@ -35,6 +35,7 @@
     __m512 vy0123456789ABCDEF = _mm512_max_ps(va0123456789ABCDEF, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-avx512f-x32.c b/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
index 5e4bf2a..aba2ef5 100644
--- a/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmaxc-avx512f-x32.c
@@ -37,6 +37,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vaGHIJKLMNOPQRSTUV, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmaxc-neon-x4.c b/src/f32-vbinary/gen/vmaxc-neon-x4.c
index e94c918..a996ab1 100644
--- a/src/f32-vbinary/gen/vmaxc-neon-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vmaxq_f32(va0123, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmaxc-neon-x8.c b/src/f32-vbinary/gen/vmaxc-neon-x8.c
index 8132011..c246fae 100644
--- a/src/f32-vbinary/gen/vmaxc-neon-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-neon-x8.c
@@ -35,6 +35,7 @@
     float32x4_t vy4567 = vmaxq_f32(va4567, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-psimd-x4.c b/src/f32-vbinary/gen/vmaxc-psimd-x4.c
index 3310f68..bc43c4f 100644
--- a/src/f32-vbinary/gen/vmaxc-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-psimd-x4.c
@@ -34,6 +34,7 @@
     psimd_f32 vy0123 = psimd_max_f32(va0123, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-psimd-x8.c b/src/f32-vbinary/gen/vmaxc-psimd-x8.c
index c74546a..9a6cf1d 100644
--- a/src/f32-vbinary/gen/vmaxc-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-psimd-x8.c
@@ -36,6 +36,7 @@
     psimd_f32 vy4567 = psimd_max_f32(va4567, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmaxc-scalar-x2.c b/src/f32-vbinary/gen/vmaxc-scalar-x2.c
index 1c46421..e9dc506 100644
--- a/src/f32-vbinary/gen/vmaxc-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmaxc-scalar-x2.c
@@ -35,6 +35,7 @@
     float vy1 = math_max_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmaxc-scalar-x4.c b/src/f32-vbinary/gen/vmaxc-scalar-x4.c
index f683c56..5459acf 100644
--- a/src/f32-vbinary/gen/vmaxc-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-scalar-x4.c
@@ -39,6 +39,7 @@
     float vy3 = math_max_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmaxc-sse-x4.c b/src/f32-vbinary/gen/vmaxc-sse-x4.c
index 72061bb..6f80551 100644
--- a/src/f32-vbinary/gen/vmaxc-sse-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-sse-x4.c
@@ -35,6 +35,7 @@
     __m128 vy0123 = _mm_max_ps(va0123, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmaxc-sse-x8.c b/src/f32-vbinary/gen/vmaxc-sse-x8.c
index dfb5bc2..98a45ca 100644
--- a/src/f32-vbinary/gen/vmaxc-sse-x8.c
+++ b/src/f32-vbinary/gen/vmaxc-sse-x8.c
@@ -37,6 +37,7 @@
     __m128 vy4567 = _mm_max_ps(va4567, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmaxc-wasm-x2.c b/src/f32-vbinary/gen/vmaxc-wasm-x2.c
index 4d7f50c..d80941e 100644
--- a/src/f32-vbinary/gen/vmaxc-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmaxc-wasm-x2.c
@@ -35,6 +35,7 @@
     float vy1 = __builtin_wasm_max_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmaxc-wasm-x4.c b/src/f32-vbinary/gen/vmaxc-wasm-x4.c
index 43f58fc..0fa1d17 100644
--- a/src/f32-vbinary/gen/vmaxc-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmaxc-wasm-x4.c
@@ -39,6 +39,7 @@
     float vy3 = __builtin_wasm_max_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmin-avx-x16.c b/src/f32-vbinary/gen/vmin-avx-x16.c
index 67d8eb9..046b03e 100644
--- a/src/f32-vbinary/gen/vmin-avx-x16.c
+++ b/src/f32-vbinary/gen/vmin-avx-x16.c
@@ -41,6 +41,7 @@
     __m256 vy89ABCDEF = _mm256_min_ps(va89ABCDEF, vb89ABCDEF);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vmin-avx-x8.c b/src/f32-vbinary/gen/vmin-avx-x8.c
index 728d2b7..a37be18 100644
--- a/src/f32-vbinary/gen/vmin-avx-x8.c
+++ b/src/f32-vbinary/gen/vmin-avx-x8.c
@@ -38,6 +38,7 @@
     __m256 vy01234567 = _mm256_min_ps(va01234567, vb01234567);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vmin-avx512f-x16.c b/src/f32-vbinary/gen/vmin-avx512f-x16.c
index 1feb387..b977956 100644
--- a/src/f32-vbinary/gen/vmin-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmin-avx512f-x16.c
@@ -37,6 +37,7 @@
     __m512 vy0123456789ABCDEF = _mm512_min_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vmin-avx512f-x32.c b/src/f32-vbinary/gen/vmin-avx512f-x32.c
index c3c2aca..894374c 100644
--- a/src/f32-vbinary/gen/vmin-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmin-avx512f-x32.c
@@ -40,6 +40,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_min_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vmin-neon-x4.c b/src/f32-vbinary/gen/vmin-neon-x4.c
index 0490b96..f4bfbd6 100644
--- a/src/f32-vbinary/gen/vmin-neon-x4.c
+++ b/src/f32-vbinary/gen/vmin-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vminq_f32(va0123, vb0123);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vmin-neon-x8.c b/src/f32-vbinary/gen/vmin-neon-x8.c
index ebd4855..2c936c3 100644
--- a/src/f32-vbinary/gen/vmin-neon-x8.c
+++ b/src/f32-vbinary/gen/vmin-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy4567 = vminq_f32(va4567, vb4567);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-psimd-x4.c b/src/f32-vbinary/gen/vmin-psimd-x4.c
index de2fe90..22cddb1 100644
--- a/src/f32-vbinary/gen/vmin-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmin-psimd-x4.c
@@ -36,6 +36,7 @@
     psimd_f32 vy0123 = psimd_min_f32(va0123, vb0123);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-psimd-x8.c b/src/f32-vbinary/gen/vmin-psimd-x8.c
index cc1f12e..da7fc6a 100644
--- a/src/f32-vbinary/gen/vmin-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmin-psimd-x8.c
@@ -39,6 +39,7 @@
     psimd_f32 vy4567 = psimd_min_f32(va4567, vb4567);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmin-scalar-x2.c b/src/f32-vbinary/gen/vmin-scalar-x2.c
index f2fc1a1..d4ee3a4 100644
--- a/src/f32-vbinary/gen/vmin-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmin-scalar-x2.c
@@ -38,6 +38,7 @@
     float vy1 = math_min_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmin-scalar-x4.c b/src/f32-vbinary/gen/vmin-scalar-x4.c
index 2d454ea..71fc93c 100644
--- a/src/f32-vbinary/gen/vmin-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmin-scalar-x4.c
@@ -44,6 +44,7 @@
     float vy3 = math_min_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmin-sse-x4.c b/src/f32-vbinary/gen/vmin-sse-x4.c
index 50e7733..a27399e 100644
--- a/src/f32-vbinary/gen/vmin-sse-x4.c
+++ b/src/f32-vbinary/gen/vmin-sse-x4.c
@@ -37,6 +37,7 @@
     __m128 vy0123 = _mm_min_ps(va0123, vb0123);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vmin-sse-x8.c b/src/f32-vbinary/gen/vmin-sse-x8.c
index e6fb094..500ed65 100644
--- a/src/f32-vbinary/gen/vmin-sse-x8.c
+++ b/src/f32-vbinary/gen/vmin-sse-x8.c
@@ -40,6 +40,7 @@
     __m128 vy4567 = _mm_min_ps(va4567, vb4567);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vmin-wasm-x2.c b/src/f32-vbinary/gen/vmin-wasm-x2.c
index 79cacbd..92e467a 100644
--- a/src/f32-vbinary/gen/vmin-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmin-wasm-x2.c
@@ -38,6 +38,7 @@
     float vy1 = __builtin_wasm_min_f32(va1, vb1);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vmin-wasm-x4.c b/src/f32-vbinary/gen/vmin-wasm-x4.c
index b63760a..75e23c9 100644
--- a/src/f32-vbinary/gen/vmin-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmin-wasm-x4.c
@@ -44,6 +44,7 @@
     float vy3 = __builtin_wasm_min_f32(va3, vb3);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vminc-avx-x16.c b/src/f32-vbinary/gen/vminc-avx-x16.c
index 4523f46..c15c0e3 100644
--- a/src/f32-vbinary/gen/vminc-avx-x16.c
+++ b/src/f32-vbinary/gen/vminc-avx-x16.c
@@ -38,6 +38,7 @@
     __m256 vy89ABCDEF = _mm256_min_ps(va89ABCDEF, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     _mm256_storeu_ps(y + 8, vy89ABCDEF);
     y += 16;
diff --git a/src/f32-vbinary/gen/vminc-avx-x8.c b/src/f32-vbinary/gen/vminc-avx-x8.c
index 5f09206..8807026 100644
--- a/src/f32-vbinary/gen/vminc-avx-x8.c
+++ b/src/f32-vbinary/gen/vminc-avx-x8.c
@@ -36,6 +36,7 @@
     __m256 vy01234567 = _mm256_min_ps(va01234567, vb);
 
 
+
     _mm256_storeu_ps(y, vy01234567);
     y += 8;
   }
diff --git a/src/f32-vbinary/gen/vminc-avx512f-x16.c b/src/f32-vbinary/gen/vminc-avx512f-x16.c
index 29317f7..9f1bb8f 100644
--- a/src/f32-vbinary/gen/vminc-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vminc-avx512f-x16.c
@@ -35,6 +35,7 @@
     __m512 vy0123456789ABCDEF = _mm512_min_ps(va0123456789ABCDEF, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     y += 16;
   }
diff --git a/src/f32-vbinary/gen/vminc-avx512f-x32.c b/src/f32-vbinary/gen/vminc-avx512f-x32.c
index dd313af..6a1ebb3 100644
--- a/src/f32-vbinary/gen/vminc-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vminc-avx512f-x32.c
@@ -37,6 +37,7 @@
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_min_ps(vaGHIJKLMNOPQRSTUV, vb);
 
 
+
     _mm512_storeu_ps(y, vy0123456789ABCDEF);
     _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
     y += 32;
diff --git a/src/f32-vbinary/gen/vminc-neon-x4.c b/src/f32-vbinary/gen/vminc-neon-x4.c
index d0663bf..0ccc583 100644
--- a/src/f32-vbinary/gen/vminc-neon-x4.c
+++ b/src/f32-vbinary/gen/vminc-neon-x4.c
@@ -33,6 +33,7 @@
     float32x4_t vy0123 = vminq_f32(va0123, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
   }
   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
diff --git a/src/f32-vbinary/gen/vminc-neon-x8.c b/src/f32-vbinary/gen/vminc-neon-x8.c
index 3a6bcbc..517f13a 100644
--- a/src/f32-vbinary/gen/vminc-neon-x8.c
+++ b/src/f32-vbinary/gen/vminc-neon-x8.c
@@ -35,6 +35,7 @@
     float32x4_t vy4567 = vminq_f32(va4567, vb);
 
 
+
     vst1q_f32(y, vy0123); y += 4;
     vst1q_f32(y, vy4567); y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-psimd-x4.c b/src/f32-vbinary/gen/vminc-psimd-x4.c
index a5481d1..7133aba 100644
--- a/src/f32-vbinary/gen/vminc-psimd-x4.c
+++ b/src/f32-vbinary/gen/vminc-psimd-x4.c
@@ -34,6 +34,7 @@
     psimd_f32 vy0123 = psimd_min_f32(va0123, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-psimd-x8.c b/src/f32-vbinary/gen/vminc-psimd-x8.c
index 92d9603..4adfb23 100644
--- a/src/f32-vbinary/gen/vminc-psimd-x8.c
+++ b/src/f32-vbinary/gen/vminc-psimd-x8.c
@@ -36,6 +36,7 @@
     psimd_f32 vy4567 = psimd_min_f32(va4567, vb);
 
 
+
     psimd_store_f32(y, vy0123);
     psimd_store_f32(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vminc-scalar-x2.c b/src/f32-vbinary/gen/vminc-scalar-x2.c
index 9118c94..cea44df 100644
--- a/src/f32-vbinary/gen/vminc-scalar-x2.c
+++ b/src/f32-vbinary/gen/vminc-scalar-x2.c
@@ -35,6 +35,7 @@
     float vy1 = math_min_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vminc-scalar-x4.c b/src/f32-vbinary/gen/vminc-scalar-x4.c
index e6fa215..2f2deda 100644
--- a/src/f32-vbinary/gen/vminc-scalar-x4.c
+++ b/src/f32-vbinary/gen/vminc-scalar-x4.c
@@ -39,6 +39,7 @@
     float vy3 = math_min_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vminc-sse-x4.c b/src/f32-vbinary/gen/vminc-sse-x4.c
index 48084b2..ca36369 100644
--- a/src/f32-vbinary/gen/vminc-sse-x4.c
+++ b/src/f32-vbinary/gen/vminc-sse-x4.c
@@ -35,6 +35,7 @@
     __m128 vy0123 = _mm_min_ps(va0123, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     y += 4;
   }
diff --git a/src/f32-vbinary/gen/vminc-sse-x8.c b/src/f32-vbinary/gen/vminc-sse-x8.c
index 63f9bc5..7f4a11b 100644
--- a/src/f32-vbinary/gen/vminc-sse-x8.c
+++ b/src/f32-vbinary/gen/vminc-sse-x8.c
@@ -37,6 +37,7 @@
     __m128 vy4567 = _mm_min_ps(va4567, vb);
 
 
+
     _mm_storeu_ps(y, vy0123);
     _mm_storeu_ps(y + 4, vy4567);
     y += 8;
diff --git a/src/f32-vbinary/gen/vminc-wasm-x2.c b/src/f32-vbinary/gen/vminc-wasm-x2.c
index 35ded66..d50e60a 100644
--- a/src/f32-vbinary/gen/vminc-wasm-x2.c
+++ b/src/f32-vbinary/gen/vminc-wasm-x2.c
@@ -35,6 +35,7 @@
     float vy1 = __builtin_wasm_min_f32(va1, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y += 2;
diff --git a/src/f32-vbinary/gen/vminc-wasm-x4.c b/src/f32-vbinary/gen/vminc-wasm-x4.c
index 3a24bb5..d8ac0ad 100644
--- a/src/f32-vbinary/gen/vminc-wasm-x4.c
+++ b/src/f32-vbinary/gen/vminc-wasm-x4.c
@@ -39,6 +39,7 @@
     float vy3 = __builtin_wasm_min_f32(va3, vb);
 
 
+
     y[0] = vy0;
     y[1] = vy1;
     y[2] = vy2;
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx-x16.c b/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
index 01660f8..7e2590d 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_mul_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx-x8.c b/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
index 0c2cc91..ba42e00 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
index 209ebd1..de64190 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
index 3a047dc..1ccf9fb 100644
--- a/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmul-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-neon-x4.c b/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
index 869794e..818d3f5 100644
--- a/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-neon-x8.c b/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
index 65af2bc..73de3bf 100644
--- a/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
     float32x4_t vy4567 = vmulq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c b/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
index 2bab626..860bfa4 100644
--- a/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c b/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
index 555495e..a0e0e0f 100644
--- a/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_mul_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c b/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
index 720ff1a..191611e 100644
--- a/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmul-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 * vb0;
     float vy1 = va1 * vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c b/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
index 1c48260..1d2e783 100644
--- a/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 * vb2;
     float vy3 = va3 * vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmul-minmax-sse-x4.c b/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
index 6b1dca7..0f0dd69 100644
--- a/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_mul_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmul-minmax-sse-x8.c b/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
index dbbdfbd..6099c87 100644
--- a/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmul-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_mul_ps(va0123, vb0123);
     __m128 vy4567 = _mm_mul_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c b/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
index 85e1075..1891226 100644
--- a/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmul-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 * vb0;
     float vy1 = va1 * vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c b/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
index f71344c..0731aa7 100644
--- a/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmul-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 * vb2;
     float vy3 = va3 * vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c b/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
index a054201..29cc7b4 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_mul_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c b/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
index 07692ff..c32e846 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
index 7c91298..708e1e3 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
index 0f77c38..dca652f 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_mul_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c b/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
index 01121e7..93de31d 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vmulq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c b/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
index 6608408..8693b14 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vmulq_f32(va0123, vb);
     float32x4_t vy4567 = vmulq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
index ea190b3..94c46bc 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
index 15c0f2b..f58446b 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_mul_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_mul_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
index 31dc83f..fe53df5 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 * vb;
     float vy1 = va1 * vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
index dfef131..24e4bcf 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 * vb;
     float vy3 = va3 * vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c b/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
index 995cf45..294bc58 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_mul_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c b/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
index dbc41a9..de42813 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_mul_ps(va0123, vb);
     __m128 vy4567 = _mm_mul_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
index f76e59f..4d8d071 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 * vb;
     float vy1 = va1 * vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
index 6f8be74..5281c86 100644
--- a/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vmulc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 * vb;
     float vy3 = va3 * vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c b/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
index 3be3723..47c7e79 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_div_ps(vb, va01234567);
     __m256 vy89ABCDEF = _mm256_div_ps(vb, va89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
index c7ae348..3e18136 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_div_ps(vb, va01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
index 5216232..1a9520a 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_div_ps(vb, va0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
index c5eb881..65acd81 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_div_ps(vb, va0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_div_ps(vb, vaGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
index f0cb985..7ffa485 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vdivq_f32(vb, va0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
index 90dc55e..bb5e055 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vdivq_f32(vb, va0123);
     float32x4_t vy4567 = vdivq_f32(vb, va4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
index 257663b..2b939f2 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_div_f32(vb, va0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
index fbfab1e..1f675a5 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_div_f32(vb, va0123);
     psimd_f32 vy4567 = psimd_div_f32(vb, va4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
index 0c53873..f3e82c6 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb / va0;
     float vy1 = vb / va1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
index e27237f..8cc8737 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb / va2;
     float vy3 = vb / va3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
index 3db24ff..dfb4490 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_div_ps(vb, va0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c b/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
index ae22169..7e72a02 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_div_ps(vb, va0123);
     __m128 vy4567 = _mm_div_ps(vb, va4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
index 6d2dc38..a9f70f6 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb / va0;
     float vy1 = vb / va1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
index e19c0e8..5b689e3 100644
--- a/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vrdivc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb / va2;
     float vy3 = vb / va3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c b/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c
new file mode 100644
index 0000000..426991a
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx-x16.c
@@ -0,0 +1,83 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vrsqrdiffc_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
+    __m256 vy89ABCDEF = _mm256_sub_ps(vb, va89ABCDEF);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c
new file mode 100644
index 0000000..19d7d6d
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx-x8.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vrsqrdiffc_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(vb, va);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c
new file mode 100644
index 0000000..60c84d9
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x16.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c
new file mode 100644
index 0000000..70f96f1
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-avx512f-x32.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vb, vaGHIJKLMNOPQRSTUV);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(vb, va);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c
new file mode 100644
index 0000000..2c87ff5
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-neon-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c
new file mode 100644
index 0000000..43304d6
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-neon-x8.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    float32x4_t vy4567 = vsubq_f32(vb, va4567);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(vb, va0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c
new file mode 100644
index 0000000..50eab11
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x4.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c
new file mode 100644
index 0000000..837f30a
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-psimd-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    psimd_f32 vy4567 = psimd_sub_f32(vb, va4567);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c
new file mode 100644
index 0000000..e26adbe
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c
new file mode 100644
index 0000000..5363ed9
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c
new file mode 100644
index 0000000..ad61bc6
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-scalar-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+    float vy2 = vb - va2;
+    float vy3 = vb - va3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = vb - va;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c
new file mode 100644
index 0000000..bae4f27
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-sse-x4.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c b/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c
new file mode 100644
index 0000000..9127dd5
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-sse-x8.c
@@ -0,0 +1,70 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    __m128 vy4567 = _mm_sub_ps(vb, va4567);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(vb, va0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c
new file mode 100644
index 0000000..b439180
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c
new file mode 100644
index 0000000..62a4e1f
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = vb - va;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c
new file mode 100644
index 0000000..dc6d893
--- /dev/null
+++ b/src/f32-vbinary/gen/vrsqrdiffc-wasm-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vrsqrdiffc_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = vb - va0;
+    float vy1 = vb - va1;
+    float vy2 = vb - va2;
+    float vy3 = vb - va3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = vb - va;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c b/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
index 5789c9c..4e73860 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
     __m256 vy89ABCDEF = _mm256_sub_ps(vb, va89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
index 067ea70..7b91d98 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(vb, va01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
index ce4f27b..0b9775d 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
index a160607..b4c7d05 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(vb, va0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vb, vaGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
index 5940782..9f3f79f 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(vb, va0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
index eaae8da..e68b6cb 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vsubq_f32(vb, va0123);
     float32x4_t vy4567 = vsubq_f32(vb, va4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
index b59870d..febe35c 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
index f67b850..f57dbf5 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(vb, va0123);
     psimd_f32 vy4567 = psimd_sub_f32(vb, va4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
index e06a755..a6189df 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb - va0;
     float vy1 = vb - va1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
index 6a999c1..64afb58 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb - va2;
     float vy3 = vb - va3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
index 7d03760..4647adb 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_sub_ps(vb, va0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c b/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
index bdafcf9..853faa6 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_sub_ps(vb, va0123);
     __m128 vy4567 = _mm_sub_ps(vb, va4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
index e58d2f1..c66ae81 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = vb - va0;
     float vy1 = vb - va1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
index c69c285..5b2dfec 100644
--- a/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vrsubc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = vb - va2;
     float vy3 = vb - va3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx-x16.c b/src/f32-vbinary/gen/vsqrdiff-avx-x16.c
new file mode 100644
index 0000000..d29cfec
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx-x16.c
@@ -0,0 +1,90 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiff_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    const __m256 vb01234567 = _mm256_loadu_ps(b);
+    const __m256 vb89ABCDEF = _mm256_loadu_ps(b + 8);
+    b += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
+    __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb89ABCDEF);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+    const __m256 vb = _mm256_maskload_ps(b, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx-x8.c b/src/f32-vbinary/gen/vsqrdiff-avx-x8.c
new file mode 100644
index 0000000..c2deaf0
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx-x8.c
@@ -0,0 +1,85 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiff_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb01234567 = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    const __m256 vb = _mm256_loadu_ps(b);
+    b += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+    const __m256 vb = _mm256_maskload_ps(b, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c b/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c
new file mode 100644
index 0000000..93b109f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx512f-x16.c
@@ -0,0 +1,71 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb0123456789ABCDEF = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+    const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c b/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c
new file mode 100644
index 0000000..c232e68
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-avx512f-x32.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    const __m512 vb0123456789ABCDEF = _mm512_loadu_ps(b);
+    const __m512 vbGHIJKLMNOPQRSTUV = _mm512_loadu_ps(b + 16);
+    b += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    const __m512 vb = _mm512_loadu_ps(b);
+    b += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+    const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-neon-x4.c b/src/f32-vbinary/gen/vsqrdiff-neon-x4.c
new file mode 100644
index 0000000..9f28346
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-neon-x4.c
@@ -0,0 +1,64 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+    const float32x4_t vb0123 = vld1q_f32(b);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-neon-x8.c b/src/f32-vbinary/gen/vsqrdiff-neon-x8.c
new file mode 100644
index 0000000..bda32d1
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-neon-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+    const float32x4_t vb4567 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    float32x4_t vy4567 = vsubq_f32(va4567, vb4567);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t vb0123 = vld1q_f32(b); b += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+    const float32x4_t vb0123 = vld1q_f32(b);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c b/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c
new file mode 100644
index 0000000..be4f52d
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-psimd-x4.c
@@ -0,0 +1,71 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c b/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c
new file mode 100644
index 0000000..7910349
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-psimd-x8.c
@@ -0,0 +1,76 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    const psimd_f32 vb4567 = psimd_load_f32(b + 4);
+    b += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    psimd_f32 vy4567 = psimd_sub_f32(va4567, vb4567);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+    b += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 vb0123 = psimd_load_f32(b);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c
new file mode 100644
index 0000000..9f8ff4e
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    const float vb = *b++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c
new file mode 100644
index 0000000..ab47261
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x2.c
@@ -0,0 +1,55 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    b += 2;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    const float vb = *b;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c b/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c
new file mode 100644
index 0000000..e850af8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-scalar-x4.c
@@ -0,0 +1,68 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    const float vb2 = b[2];
+    const float vb3 = b[3];
+    b += 4;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+    float vy2 = va2 - vb2;
+    float vy3 = va3 - vb3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      const float vb = *b++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-sse-x4.c b/src/f32-vbinary/gen/vsqrdiff-sse-x4.c
new file mode 100644
index 0000000..5904bcb
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-sse-x4.c
@@ -0,0 +1,72 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 vb0123 = _mm_loadu_ps(b);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-sse-x8.c b/src/f32-vbinary/gen/vsqrdiff-sse-x8.c
new file mode 100644
index 0000000..ca6ab69
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-sse-x8.c
@@ -0,0 +1,77 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    const __m128 vb4567 = _mm_loadu_ps(b + 4);
+    b += 8;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    __m128 vy4567 = _mm_sub_ps(va4567, vb4567);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    const __m128 vb0123 = _mm_loadu_ps(b);
+    b += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 vb0123 = _mm_loadu_ps(b);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c
new file mode 100644
index 0000000..7f0f293
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    const float vb = *b++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c
new file mode 100644
index 0000000..d5103ec
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x2.c
@@ -0,0 +1,55 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    b += 2;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    const float vb = *b;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c b/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c
new file mode 100644
index 0000000..a17b560
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiff-wasm-x4.c
@@ -0,0 +1,68 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vop-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiff_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    const float vb0 = b[0];
+    const float vb1 = b[1];
+    const float vb2 = b[2];
+    const float vb3 = b[3];
+    b += 4;
+
+    float vy0 = va0 - vb0;
+    float vy1 = va1 - vb1;
+    float vy2 = va2 - vb2;
+    float vy3 = va3 - vb3;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      const float vb = *b++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c b/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c
new file mode 100644
index 0000000..636766f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx-x16.c
@@ -0,0 +1,83 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiffc_ukernel__avx_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
+    a += 16;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
+    __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+    vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vy89ABCDEF);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    _mm256_storeu_ps(y + 8, vy89ABCDEF);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c b/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c
new file mode 100644
index 0000000..2e9bdab
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx-x8.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vsqrdiffc_ukernel__avx_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m256 vb = _mm256_broadcast_ss(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va01234567 = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
+
+    vy01234567 = _mm256_mul_ps(vy01234567, vy01234567);
+
+
+    _mm256_storeu_ps(y, vy01234567);
+    y += 8;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m256 va = _mm256_loadu_ps(a);
+    a += 8;
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 va = _mm256_maskload_ps(a, vmask);
+
+    __m256 vy = _mm256_sub_ps(va, vb);
+    vy = _mm256_mul_ps(vy, vy);
+
+    // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c
new file mode 100644
index 0000000..f43d9d8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x16.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__avx512f_x16(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    y += 16;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c
new file mode 100644
index 0000000..487632f
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-avx512f-x32.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-avx512f.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__avx512f_x32(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m512 vb = _mm512_set1_ps(*b);
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    const __m512 va0123456789ABCDEF = _mm512_loadu_ps(a);
+    const __m512 vaGHIJKLMNOPQRSTUV = _mm512_loadu_ps(a + 16);
+    a += 32;
+
+    __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
+    __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vb);
+
+    vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vy0123456789ABCDEF);
+    vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vyGHIJKLMNOPQRSTUV);
+
+
+    _mm512_storeu_ps(y, vy0123456789ABCDEF);
+    _mm512_storeu_ps(y + 16, vyGHIJKLMNOPQRSTUV);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    const __m512 va = _mm512_loadu_ps(a);
+    a += 16;
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+
+    __m512 vy = _mm512_sub_ps(va, vb);
+    vy = _mm512_mul_ps(vy, vy);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c b/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c
new file mode 100644
index 0000000..aa1c257
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-neon-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__neon_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c b/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c
new file mode 100644
index 0000000..da2f1dc
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-neon-x8.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__neon_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float32x4_t vb = vld1q_dup_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+    const float32x4_t va4567 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    float32x4_t vy4567 = vsubq_f32(va4567, vb);
+
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vy4567 = vmulq_f32(vy4567, vy4567);
+
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float32x4_t va0123 = vld1q_f32(a); a += 4;
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+    vst1q_f32(y, vy0123); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float32x4_t va0123 = vld1q_f32(a);
+
+    float32x4_t vy0123 = vsubq_f32(va0123, vb);
+    vy0123 = vmulq_f32(vy0123, vy0123);
+
+    float32x2_t vy01 = vget_low_f32(vy0123);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy01); y += 2;
+      vy01 = vget_high_f32(vy0123);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy01, 0);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c b/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c
new file mode 100644
index 0000000..8a2a174
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-psimd-x4.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__psimd_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+
+
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c b/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c
new file mode 100644
index 0000000..d47eeea
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-psimd-x8.c
@@ -0,0 +1,69 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-psimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <psimd.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__psimd_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const psimd_f32 vb = psimd_load_splat_f32(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    const psimd_f32 va4567 = psimd_load_f32(a + 4);
+    a += 8;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    psimd_f32 vy4567 = psimd_sub_f32(va4567, vb);
+
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    vy4567 = psimd_mul_f32(vy4567, vy4567);
+
+
+    psimd_store_f32(y, vy0123);
+    psimd_store_f32(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+    a += 4;
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    psimd_store_f32(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const psimd_f32 va0123 = psimd_load_f32(a);
+
+    psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
+    vy0123 = psimd_mul_f32(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      psimd_store2_f32(y, vy0123);
+      vy0123 = psimd_concat_hi_f32(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      psimd_store1_f32(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c
new file mode 100644
index 0000000..7a88d0a
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c
new file mode 100644
index 0000000..63771e5
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c b/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c
new file mode 100644
index 0000000..9fcc8dc
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-scalar-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__scalar_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+    float vy2 = va2 - vb;
+    float vy3 = va3 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c b/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c
new file mode 100644
index 0000000..24bdde6
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-sse-x4.c
@@ -0,0 +1,66 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__sse_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+
+
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c b/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c
new file mode 100644
index 0000000..43c1eb0
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-sse-x8.c
@@ -0,0 +1,70 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-sse.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__sse_x8(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const __m128 vb = _mm_load1_ps(b);
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    const __m128 va4567 = _mm_loadu_ps(a + 4);
+    a += 8;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    __m128 vy4567 = _mm_sub_ps(va4567, vb);
+
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    vy4567 = _mm_mul_ps(vy4567, vy4567);
+
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+    a += 4;
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    _mm_storeu_ps(y, vy0123);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const __m128 va0123 = _mm_loadu_ps(a);
+
+    __m128 vy0123 = _mm_sub_ps(va0123, vb);
+    vy0123 = _mm_mul_ps(vy0123, vy0123);
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy0123);
+      vy0123 = _mm_movehl_ps(vy0123, vy0123);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy0123);
+    }
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c
new file mode 100644
index 0000000..abaa0f8
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x1.c
@@ -0,0 +1,35 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x1(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= sizeof(float); n -= sizeof(float)) {
+    const float va = *a++;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y++ = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c
new file mode 100644
index 0000000..415d9be
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x2.c
@@ -0,0 +1,51 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x2(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    a += 2;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const float va = *a;
+    float vy = va - vb;
+    vy = vy * vy;
+    *y = vy;
+  }
+}
diff --git a/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c b/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c
new file mode 100644
index 0000000..07fdf22
--- /dev/null
+++ b/src/f32-vbinary/gen/vsqrdiffc-wasm-x4.c
@@ -0,0 +1,62 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-vbinary/vopc-scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/math.h>
+#include <xnnpack/vbinary.h>
+
+
+void xnn_f32_vsqrdiffc_ukernel__wasm_x4(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+
+  const float vb = *b;
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    const float va0 = a[0];
+    const float va1 = a[1];
+    const float va2 = a[2];
+    const float va3 = a[3];
+    a += 4;
+
+    float vy0 = va0 - vb;
+    float vy1 = va1 - vb;
+    float vy2 = va2 - vb;
+    float vy3 = va3 - vb;
+
+    vy0 = vy0 * vy0;
+    vy1 = vy1 * vy1;
+    vy2 = vy2 * vy2;
+    vy3 = vy3 * vy3;
+
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const float va = *a++;
+      float vy = va - vb;
+      vy = vy * vy;
+      *y++ = vy;
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx-x16.c b/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
index ff72f26..fd9aee3 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx-x16.c
@@ -42,6 +42,7 @@
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
     __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb89ABCDEF);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx-x8.c b/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
index 7504d94..515afd7 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx-x8.c
@@ -39,6 +39,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb01234567);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
index 2bf7b7f..36d84ec 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx512f-x16.c
@@ -38,6 +38,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
index 8311cb7..ed289c6 100644
--- a/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
@@ -41,6 +41,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb0123456789ABCDEF);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vbGHIJKLMNOPQRSTUV);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-neon-x4.c b/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
index 4f9b839..62a9564 100644
--- a/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-neon-x8.c b/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
index 3f3ca44..a5b4ada 100644
--- a/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-neon-x8.c
@@ -37,6 +37,7 @@
     float32x4_t vy0123 = vsubq_f32(va0123, vb0123);
     float32x4_t vy4567 = vsubq_f32(va4567, vb4567);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c b/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
index 445d44a..1065e95 100644
--- a/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-psimd-x4.c
@@ -37,6 +37,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c b/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
index 07e8171..f1931ec 100644
--- a/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-psimd-x8.c
@@ -40,6 +40,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb0123);
     psimd_f32 vy4567 = psimd_sub_f32(va4567, vb4567);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c b/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
index 27c3d69..16cdb9d 100644
--- a/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vsub-minmax-scalar-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 - vb0;
     float vy1 = va1 - vb1;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c b/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
index 33c0b97..81e52d0 100644
--- a/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-scalar-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 - vb2;
     float vy3 = va3 - vb3;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsub-minmax-sse-x4.c b/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
index f61af92..9509137 100644
--- a/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-sse-x4.c
@@ -38,6 +38,7 @@
 
     __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsub-minmax-sse-x8.c b/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
index 8995fca..83d9ed2 100644
--- a/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vsub-minmax-sse-x8.c
@@ -41,6 +41,7 @@
     __m128 vy0123 = _mm_sub_ps(va0123, vb0123);
     __m128 vy4567 = _mm_sub_ps(va4567, vb4567);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c b/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
index 6ac500c..eb66d63 100644
--- a/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vsub-minmax-wasm-x2.c
@@ -39,6 +39,7 @@
     float vy0 = va0 - vb0;
     float vy1 = va1 - vb1;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c b/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
index ddedd13..167c5d6 100644
--- a/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vsub-minmax-wasm-x4.c
@@ -45,6 +45,7 @@
     float vy2 = va2 - vb2;
     float vy3 = va3 - vb3;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c b/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
index dc876a7..7b5800c 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
@@ -39,6 +39,7 @@
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
     __m256 vy89ABCDEF = _mm256_sub_ps(va89ABCDEF, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c b/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
index 76e973a..30d34cf 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
@@ -37,6 +37,7 @@
 
     __m256 vy01234567 = _mm256_sub_ps(va01234567, vb);
 
+
     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
 
     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
index 003eef5..cc1553c 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
@@ -36,6 +36,7 @@
 
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
 
     vy0123456789ABCDEF = _mm512_min_ps(vy0123456789ABCDEF, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
index d7db19c..5aa88d5 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
@@ -38,6 +38,7 @@
     __m512 vy0123456789ABCDEF = _mm512_sub_ps(va0123456789ABCDEF, vb);
     __m512 vyGHIJKLMNOPQRSTUV = _mm512_sub_ps(vaGHIJKLMNOPQRSTUV, vb);
 
+
     vy0123456789ABCDEF = _mm512_max_ps(vy0123456789ABCDEF, vy_min);
     vyGHIJKLMNOPQRSTUV = _mm512_max_ps(vyGHIJKLMNOPQRSTUV, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c b/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
index 88afb06..0b44608 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
@@ -34,6 +34,7 @@
 
     float32x4_t vy0123 = vsubq_f32(va0123, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
 
     vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c b/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
index 112e8f6..3c8f247 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
@@ -36,6 +36,7 @@
     float32x4_t vy0123 = vsubq_f32(va0123, vb);
     float32x4_t vy4567 = vsubq_f32(va4567, vb);
 
+
     vy0123 = vmaxq_f32(vy0123, vy_min);
     vy4567 = vmaxq_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c b/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
index 1816d8f..b583755 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-psimd-x4.c
@@ -35,6 +35,7 @@
 
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
 
     vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c b/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
index 014e32d..6633b80 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-psimd-x8.c
@@ -37,6 +37,7 @@
     psimd_f32 vy0123 = psimd_sub_f32(va0123, vb);
     psimd_f32 vy4567 = psimd_sub_f32(va4567, vb);
 
+
     vy0123 = psimd_max_f32(vy0123, vy_min);
     vy4567 = psimd_max_f32(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c b/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
index 393eeb1..16b8ca3 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-scalar-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 - vb;
     float vy1 = va1 - vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c b/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
index b8bde24..589ae49 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-scalar-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 - vb;
     float vy3 = va3 - vb;
 
+
     vy0 = math_max_f32(vy0, vy_min);
     vy1 = math_max_f32(vy1, vy_min);
     vy2 = math_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c b/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
index d57eeb6..62611f4 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-sse-x4.c
@@ -36,6 +36,7 @@
 
     __m128 vy0123 = _mm_sub_ps(va0123, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
 
     vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c b/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
index 39f4da1..735102b 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-sse-x8.c
@@ -38,6 +38,7 @@
     __m128 vy0123 = _mm_sub_ps(va0123, vb);
     __m128 vy4567 = _mm_sub_ps(va4567, vb);
 
+
     vy0123 = _mm_max_ps(vy0123, vy_min);
     vy4567 = _mm_max_ps(vy4567, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c b/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
index c219eca..c7463d6 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-wasm-x2.c
@@ -36,6 +36,7 @@
     float vy0 = va0 - vb;
     float vy1 = va1 - vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
 
diff --git a/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c b/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
index 21c60b4..1970ca9 100644
--- a/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
+++ b/src/f32-vbinary/gen/vsubc-minmax-wasm-x4.c
@@ -40,6 +40,7 @@
     float vy2 = va2 - vb;
     float vy3 = va3 - vb;
 
+
     vy0 = __builtin_wasm_max_f32(vy0, vy_min);
     vy1 = __builtin_wasm_max_f32(vy1, vy_min);
     vy2 = __builtin_wasm_max_f32(vy2, vy_min);
diff --git a/src/f32-vbinary/vop-avx.c.in b/src/f32-vbinary/vop-avx.c.in
index 7863bec..b09f80f 100644
--- a/src/f32-vbinary/vop-avx.c.in
+++ b/src/f32-vbinary/vop-avx.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,7 @@
 $  "MIN": lambda x, y: "_mm256_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm256_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm256_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm256_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -56,6 +57,10 @@
     $for N in range(0, BATCH_TILE, 8):
       __m256 vy${ABC[N:N+8]} = ${_MM256_OP_PS("va" + ABC[N:N+8], "vb" + ABC[N:N+8])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = _mm256_max_ps(vy${ABC[N:N+8]}, vy_min);
@@ -77,6 +82,8 @@
       b += 8;
 
       __m256 vy = ${_MM256_OP_PS("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = _mm256_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm256_max_ps(vy, vy_min);
         vy = _mm256_min_ps(vy, vy_max);
@@ -92,6 +99,8 @@
     const __m256 vb = _mm256_maskload_ps(b, vmask);
 
     __m256 vy = ${_MM256_OP_PS("va", "vb")};
+    $if OP == "SQRDIFF":
+      vy = _mm256_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm256_max_ps(vy, vy_min);
       vy = _mm256_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vop-avx512f.c.in b/src/f32-vbinary/vop-avx512f.c.in
index f36b303..cafb5ec 100644
--- a/src/f32-vbinary/vop-avx512f.c.in
+++ b/src/f32-vbinary/vop-avx512f.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 16 == 0
 $assert BATCH_TILE >= 16
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -24,6 +24,7 @@
 $  "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -55,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 16):
       __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 16):
+        vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 16):
         vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
@@ -76,6 +81,8 @@
       b += 16;
 
       __m512 vy = ${_MM512_OP_PS("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = _mm512_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm512_max_ps(vy, vy_min);
         vy = _mm512_min_ps(vy, vy_max);
@@ -93,6 +100,8 @@
     const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
 
     __m512 vy = ${_MM512_OP_PS("va", "vb")};
+    $if OP == "SQRDIFF":
+      vy = _mm512_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm512_max_ps(vy, vy_min);
       vy = _mm512_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vop-neon.c.in b/src/f32-vbinary/vop-neon.c.in
index f427fdb..4a20103 100644
--- a/src/f32-vbinary/vop-neon.c.in
+++ b/src/f32-vbinary/vop-neon.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "vminq_f32(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "vmulq_f32(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "vsubq_f32(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "vsubq_f32(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -48,6 +49,10 @@
     $for N in range(0, BATCH_TILE, 4):
       float32x4_t vy${ABC[N:N+4]} = ${VOPQ_F32("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = vmulq_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = vmaxq_f32(vy${ABC[N:N+4]}, vy_min);
@@ -64,6 +69,8 @@
       const float32x4_t vb0123 = vld1q_f32(b); b += 4;
 
       float32x4_t vy0123 = ${VOPQ_F32("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = vmulq_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = vmaxq_f32(vy0123, vy_min);
         vy0123 = vminq_f32(vy0123, vy_max);
@@ -74,6 +81,8 @@
     const float32x4_t vb0123 = vld1q_f32(b);
 
     float32x4_t vy0123 = ${VOPQ_F32("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = vmulq_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = vmaxq_f32(vy0123, vy_min);
       vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vop-psimd.c.in b/src/f32-vbinary/vop-psimd.c.in
index f1640f6..ffd03f2 100644
--- a/src/f32-vbinary/vop-psimd.c.in
+++ b/src/f32-vbinary/vop-psimd.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "psimd_min_f32(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "psimd_mul_f32(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "psimd_sub_f32(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "psimd_sub_f32(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -54,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 4):
       psimd_f32 vy${ABC[N:N+4]} = ${PSIMD_OP_F32("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = psimd_mul_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = psimd_max_f32(vy${ABC[N:N+4]}, vy_min);
@@ -75,6 +80,8 @@
       b += 4;
 
       psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = psimd_mul_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = psimd_max_f32(vy0123, vy_min);
         vy0123 = psimd_min_f32(vy0123, vy_max);
@@ -86,6 +93,8 @@
     const psimd_f32 vb0123 = psimd_load_f32(b);
 
     psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = psimd_mul_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = psimd_max_f32(vy0123, vy_min);
       vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vop-scalar.c.in b/src/f32-vbinary/vop-scalar.c.in
index dabe3a0..85696ac 100644
--- a/src/f32-vbinary/vop-scalar.c.in
+++ b/src/f32-vbinary/vop-scalar.c.in
@@ -5,7 +5,7 @@
 
 $assert BATCH_TILE >= 1
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -23,6 +23,7 @@
 $  "MIN": lambda x, y: "%s(%s, %s)" % (MIN_F32, x, y),
 $  "MUL": lambda x, y: "%s * %s" % (x, y),
 $  "SUB": lambda x, y: "%s - %s" % (x, y),
+$  "SQRDIFF": lambda x, y: "%s - %s" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +54,10 @@
       $for N in range(BATCH_TILE):
         float vy${ABC[N]} = ${OP_FUNC("va" + ABC[N], "vb" + ABC[N])};
 
+      $if OP == "SQRDIFF":
+        $for N in range(BATCH_TILE):
+          vy${ABC[N]} = vy${ABC[N]} * vy${ABC[N]};
+
       $if ACTIVATION == "MINMAX":
         $for N in range(BATCH_TILE):
           vy${ABC[N]} = ${MAX_F32}(vy${ABC[N]}, vy_min);
@@ -70,6 +75,8 @@
           const float va = *a++;
           const float vb = *b++;
           float vy = ${OP_FUNC("va", "vb")};
+          $if OP == "SQRDIFF":
+            vy = vy * vy;
           $if ACTIVATION == "MINMAX":
             vy = ${MAX_F32}(vy, vy_min);
             vy = ${MIN_F32}(vy, vy_max);
@@ -80,6 +87,8 @@
         const float va = *a;
         const float vb = *b;
         float vy = ${OP_FUNC("va", "vb")};
+        $if OP == "SQRDIFF":
+          vy = vy * vy;
         $if ACTIVATION == "MINMAX":
           vy = ${MAX_F32}(vy, vy_min);
           vy = ${MIN_F32}(vy, vy_max);
@@ -90,6 +99,8 @@
       const float va = *a++;
       const float vb = *b++;
       float vy = ${OP_FUNC("va", "vb")};
+      $if OP == "SQRDIFF":
+        vy = vy * vy;
       $if ACTIVATION == "MINMAX":
         vy = ${MAX_F32}(vy, vy_min);
         vy = ${MIN_F32}(vy, vy_max);
diff --git a/src/f32-vbinary/vop-sse.c.in b/src/f32-vbinary/vop-sse.c.in
index 3825e8d..1565ce7 100644
--- a/src/f32-vbinary/vop-sse.c.in
+++ b/src/f32-vbinary/vop-sse.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -24,6 +24,7 @@
 $  "MIN": lambda x, y: "_mm_min_ps(%s, %s)" % (x, y),
 $  "MUL": lambda x, y: "_mm_mul_ps(%s, %s)" % (x, y),
 $  "SUB": lambda x, y: "_mm_sub_ps(%s, %s)" % (x, y),
+$  "SQRDIFF": lambda x, y: "_mm_sub_ps(%s, %s)" % (x, y),
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -55,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 4):
       __m128 vy${ABC[N:N+4]} = ${_MM_OP_PS("va" + ABC[N:N+4], "vb" + ABC[N:N+4])};
 
+    $if OP == "SQRDIFF":
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = _mm_max_ps(vy${ABC[N:N+4]}, vy_min);
@@ -76,6 +81,8 @@
       b += 4;
 
       __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
+      $if OP == "SQRDIFF":
+        vy0123 = _mm_mul_ps(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = _mm_max_ps(vy0123, vy_min);
         vy0123 = _mm_min_ps(vy0123, vy_max);
@@ -87,6 +94,8 @@
     const __m128 vb0123 = _mm_loadu_ps(b);
 
     __m128 vy0123 = ${_MM_OP_PS("va0123", "vb0123")};
+    $if OP == "SQRDIFF":
+      vy0123 = _mm_mul_ps(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = _mm_max_ps(vy0123, vy_min);
       vy0123 = _mm_min_ps(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-avx.c.in b/src/f32-vbinary/vopc-avx.c.in
index 380b1b8..badc839 100644
--- a/src/f32-vbinary/vopc-avx.c.in
+++ b/src/f32-vbinary/vopc-avx.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -27,6 +27,8 @@
 $  "MUL": lambda x: "_mm256_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm256_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm256_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm256_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm256_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -54,6 +56,10 @@
     $for N in range(0, BATCH_TILE, 8):
       __m256 vy${ABC[N:N+8]} = ${_MM256_OP_PS("va" + ABC[N:N+8])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 8):
+        vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 8):
         vy${ABC[N:N+8]} = _mm256_max_ps(vy${ABC[N:N+8]}, vy_min);
@@ -72,6 +78,8 @@
       a += 8;
 
       __m256 vy = ${_MM256_OP_PS("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = _mm256_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm256_max_ps(vy, vy_min);
         vy = _mm256_min_ps(vy, vy_max);
@@ -86,6 +94,8 @@
     const __m256 va = _mm256_maskload_ps(a, vmask);
 
     __m256 vy = ${_MM256_OP_PS("va")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy = _mm256_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm256_max_ps(vy, vy_min);
       vy = _mm256_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-avx512f.c.in b/src/f32-vbinary/vopc-avx512f.c.in
index 6460ebe..0548296 100644
--- a/src/f32-vbinary/vopc-avx512f.c.in
+++ b/src/f32-vbinary/vopc-avx512f.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 16 == 0
 $assert BATCH_TILE >= 16
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -26,6 +26,8 @@
 $  "MUL": lambda x: "_mm512_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm512_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm512_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm512_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm512_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 16):
       __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 16):
+        vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 16):
         vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
@@ -71,6 +77,8 @@
       a += 16;
 
       __m512 vy = ${_MM512_OP_PS("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = _mm512_mul_ps(vy, vy);
       $if ACTIVATION == "MINMAX":
         vy = _mm512_max_ps(vy, vy_min);
         vy = _mm512_min_ps(vy, vy_max);
@@ -87,6 +95,8 @@
     const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
 
     __m512 vy = ${_MM512_OP_PS("va")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy = _mm512_mul_ps(vy, vy);
     $if ACTIVATION == "MINMAX":
       vy = _mm512_max_ps(vy, vy_min);
       vy = _mm512_min_ps(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-neon.c.in b/src/f32-vbinary/vopc-neon.c.in
index de94db7..e9b6893 100644
--- a/src/f32-vbinary/vopc-neon.c.in
+++ b/src/f32-vbinary/vopc-neon.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "vmulq_f32(%s, vb)" % x,
 $  "SUB": lambda x: "vsubq_f32(%s, vb)" % x,
 $  "RSUB": lambda x: "vsubq_f32(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "vsubq_f32(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "vsubq_f32(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -50,6 +52,10 @@
     $for N in range(0, BATCH_TILE, 4):
       float32x4_t vy${ABC[N:N+4]} = ${VOPQ_F32("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = vmulq_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = vmaxq_f32(vy${ABC[N:N+4]}, vy_min);
@@ -65,6 +71,8 @@
       const float32x4_t va0123 = vld1q_f32(a); a += 4;
 
       float32x4_t vy0123 = ${VOPQ_F32("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = vmulq_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = vmaxq_f32(vy0123, vy_min);
         vy0123 = vminq_f32(vy0123, vy_max);
@@ -74,6 +82,8 @@
     const float32x4_t va0123 = vld1q_f32(a);
 
     float32x4_t vy0123 = ${VOPQ_F32("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = vmulq_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = vmaxq_f32(vy0123, vy_min);
       vy0123 = vminq_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-psimd.c.in b/src/f32-vbinary/vopc-psimd.c.in
index 45877e4..e60a492 100644
--- a/src/f32-vbinary/vopc-psimd.c.in
+++ b/src/f32-vbinary/vopc-psimd.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "psimd_mul_f32(%s, vb)" % x,
 $  "SUB": lambda x: "psimd_sub_f32(%s, vb)" % x,
 $  "RSUB": lambda x: "psimd_sub_f32(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "psimd_sub_f32(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "psimd_sub_f32(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -52,6 +54,10 @@
     $for N in range(0, BATCH_TILE, 4):
       psimd_f32 vy${ABC[N:N+4]} = ${PSIMD_OP_F32("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = psimd_mul_f32(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = psimd_max_f32(vy${ABC[N:N+4]}, vy_min);
@@ -70,6 +76,8 @@
       a += 4;
 
       psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = psimd_mul_f32(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = psimd_max_f32(vy0123, vy_min);
         vy0123 = psimd_min_f32(vy0123, vy_max);
@@ -80,6 +88,8 @@
     const psimd_f32 va0123 = psimd_load_f32(a);
 
     psimd_f32 vy0123 = ${PSIMD_OP_F32("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = psimd_mul_f32(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = psimd_max_f32(vy0123, vy_min);
       vy0123 = psimd_min_f32(vy0123, vy_max);
diff --git a/src/f32-vbinary/vopc-scalar.c.in b/src/f32-vbinary/vopc-scalar.c.in
index cf65499..c39347f 100644
--- a/src/f32-vbinary/vopc-scalar.c.in
+++ b/src/f32-vbinary/vopc-scalar.c.in
@@ -5,7 +5,7 @@
 
 $assert BATCH_TILE >= 1
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -25,6 +25,8 @@
 $  "MUL": lambda x: "%s * vb" % x,
 $  "SUB": lambda x: "%s - vb" % x,
 $  "RSUB": lambda x: "vb - %s" % x,
+$  "SQRDIFF": lambda x: "%s - vb" % x,
+$  "RSQRDIFF": lambda x: "vb - %s" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -52,6 +54,10 @@
       $for N in range(BATCH_TILE):
         float vy${ABC[N]} = ${OP_FUNC("va" + ABC[N])};
 
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        $for N in range(BATCH_TILE):
+          vy${ABC[N]} = vy${ABC[N]} * vy${ABC[N]};
+
       $if ACTIVATION == "MINMAX":
         $for N in range(BATCH_TILE):
           vy${ABC[N]} = ${MAX_F32}(vy${ABC[N]}, vy_min);
@@ -68,6 +74,8 @@
         do {
           const float va = *a++;
           float vy = ${OP_FUNC("va")};
+          $if OP in ["SQRDIFF", "RSQRDIFF"]:
+            vy = vy * vy;
           $if ACTIVATION == "MINMAX":
             vy = ${MAX_F32}(vy, vy_min);
             vy = ${MIN_F32}(vy, vy_max);
@@ -77,6 +85,8 @@
       $else:
         const float va = *a;
         float vy = ${OP_FUNC("va")};
+        $if OP in ["SQRDIFF", "RSQRDIFF"]:
+          vy = vy * vy;
         $if ACTIVATION == "MINMAX":
           vy = ${MAX_F32}(vy, vy_min);
           vy = ${MIN_F32}(vy, vy_max);
@@ -86,6 +96,8 @@
     for (; n >= sizeof(float); n -= sizeof(float)) {
       const float va = *a++;
       float vy = ${OP_FUNC("va")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy = vy * vy;
       $if ACTIVATION == "MINMAX":
         vy = ${MAX_F32}(vy, vy_min);
         vy = ${MIN_F32}(vy, vy_max);
diff --git a/src/f32-vbinary/vopc-sse.c.in b/src/f32-vbinary/vopc-sse.c.in
index 99d93c7..c4ba0bf 100644
--- a/src/f32-vbinary/vopc-sse.c.in
+++ b/src/f32-vbinary/vopc-sse.c.in
@@ -6,7 +6,7 @@
 $assert BATCH_TILE % 4 == 0
 $assert BATCH_TILE >= 4
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
+$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF", "RSQRDIFF"]
 $assert ACTIVATION in ["LINEAR", "MINMAX"]
 #include <assert.h>
 
@@ -26,6 +26,8 @@
 $  "MUL": lambda x: "_mm_mul_ps(%s, vb)" % x,
 $  "SUB": lambda x: "_mm_sub_ps(%s, vb)" % x,
 $  "RSUB": lambda x: "_mm_sub_ps(vb, %s)" % x,
+$  "SQRDIFF": lambda x: "_mm_sub_ps(%s, vb)" % x,
+$  "RSQRDIFF": lambda x: "_mm_sub_ps(vb, %s)" % x,
 $}[OP]
 $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
@@ -53,6 +55,10 @@
     $for N in range(0, BATCH_TILE, 4):
       __m128 vy${ABC[N:N+4]} = ${_MM_OP_PS("va" + ABC[N:N+4])};
 
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      $for N in range(0, BATCH_TILE, 4):
+        vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vy${ABC[N:N+4]});
+
     $if ACTIVATION == "MINMAX":
       $for N in range(0, BATCH_TILE, 4):
         vy${ABC[N:N+4]} = _mm_max_ps(vy${ABC[N:N+4]}, vy_min);
@@ -71,6 +77,8 @@
       a += 4;
 
       __m128 vy0123 = ${_MM_OP_PS("va0123")};
+      $if OP in ["SQRDIFF", "RSQRDIFF"]:
+        vy0123 = _mm_mul_ps(vy0123, vy0123);
       $if ACTIVATION == "MINMAX":
         vy0123 = _mm_max_ps(vy0123, vy_min);
         vy0123 = _mm_min_ps(vy0123, vy_max);
@@ -81,6 +89,8 @@
     const __m128 va0123 = _mm_loadu_ps(a);
 
     __m128 vy0123 = ${_MM_OP_PS("va0123")};
+    $if OP in ["SQRDIFF", "RSQRDIFF"]:
+      vy0123 = _mm_mul_ps(vy0123, vy0123);
     $if ACTIVATION == "MINMAX":
       vy0123 = _mm_max_ps(vy0123, vy_min);
       vy0123 = _mm_min_ps(vy0123, vy_max);