AVX & AVX512F versions of binary elementwise micro-kernels

PiperOrigin-RevId: 284867789
diff --git a/src/f32-vbinary/vop-avx512f.c.in b/src/f32-vbinary/vop-avx512f.c.in
new file mode 100644
index 0000000..84594a3
--- /dev/null
+++ b/src/f32-vbinary/vop-avx512f.c.in
@@ -0,0 +1,93 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 16 == 0
+$assert BATCH_TILE >= 16
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vbinary.h>
+
+
+$_MM512_OP_PS = {
+$  "ADD": lambda x, y: "_mm512_add_ps(%s, %s)" % (x, y),
+$  "DIV": lambda x, y: "_mm512_div_ps(%s, %s)" % (x, y),
+$  "MAX": lambda x, y: "_mm512_max_ps(%s, %s)" % (x, y),
+$  "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y),
+$  "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y),
+$  "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
+$}[OP]
+void xnn_f32_v${OP.lower()}_ukernel__avx512f_x${BATCH_TILE}(
+    size_t n,
+    const float* a,
+    const float* b,
+    float* y,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vy_min = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
+  const __m512 vy_max = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
+
+  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+    const __m512 va${ABC[0:16]} = _mm512_loadu_ps(a);
+    $for N in range(16, BATCH_TILE, 16):
+      const __m512 va${ABC[N:N+16]} = _mm512_loadu_ps(a + ${N});
+    a += ${BATCH_TILE};
+
+    const __m512 vb${ABC[0:16]} = _mm512_loadu_ps(b);
+    $for N in range(16, BATCH_TILE, 16):
+      const __m512 vb${ABC[N:N+16]} = _mm512_loadu_ps(b + ${N});
+    b += ${BATCH_TILE};
+
+    $for N in range(0, BATCH_TILE, 16):
+      __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])};
+
+    $for N in range(0, BATCH_TILE, 16):
+      vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
+
+    $for N in range(0, BATCH_TILE, 16):
+      vy${ABC[N:N+16]} = _mm512_min_ps(vy${ABC[N:N+16]}, vy_max);
+
+    _mm512_storeu_ps(y, vy${ABC[0:16]});
+    $for N in range(16, BATCH_TILE, 16):
+      _mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]});
+    y += ${BATCH_TILE};
+  }
+  $if BATCH_TILE >= 16:
+    for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+      const __m512 va = _mm512_loadu_ps(a);
+      a += 16;
+
+      const __m512 vb = _mm512_loadu_ps(b);
+      b += 16;
+
+      __m512 vy = ${_MM512_OP_PS("va", "vb")};
+      vy = _mm512_max_ps(vy, vy_min);
+      vy = _mm512_min_ps(vy, vy_max);
+      _mm512_storeu_ps(y, vy);
+      y += 16;
+    }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
+    const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
+
+    __m512 vy = ${_MM512_OP_PS("va", "vb")};
+    vy = _mm512_max_ps(vy, vy_min);
+    vy = _mm512_min_ps(vy, vy_max);
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}