LRELU (Leaky ReLU) micro-kernels

PiperOrigin-RevId: 315791296
diff --git a/src/f32-vlrelu/avx.c.in b/src/f32-vlrelu/avx.c.in
new file mode 100644
index 0000000..9e3a244
--- /dev/null
+++ b/src/f32-vlrelu/avx.c.in
@@ -0,0 +1,80 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_vlrelu_ukernel__avx_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vslope = _mm256_broadcast_ps((const __m128*) params->sse.slope);
+  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+    const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x);
+    $for N in range(8, BATCH_TILE, 8):
+      const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N});
+    x += ${BATCH_TILE};
+
+    $for N in range(0, BATCH_TILE, 8):
+      __m256 vacc${ABC[N:N+8]} = _mm256_mul_ps(vx${ABC[N:N+8]}, vslope);
+
+    $for N in range(0, BATCH_TILE, 8):
+      vacc${ABC[N:N+8]} = _mm256_blendv_ps(vx${ABC[N:N+8]}, vacc${ABC[N:N+8]}, vx${ABC[N:N+8]});
+
+    _mm256_storeu_ps(y, vacc${ABC[0:8]});
+    $for N in range(8, BATCH_TILE, 8):
+      _mm256_storeu_ps(y + ${N}, vacc${ABC[N:N+8]});
+    y += ${BATCH_TILE};
+  }
+  $if BATCH_TILE > 8:
+    for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+      const __m256 vx = _mm256_loadu_ps(x);
+      x += 8;
+      __m256 vacc = _mm256_mul_ps(vx, vslope);
+      vacc = _mm256_blendv_ps(vx, vacc, vx);
+      _mm256_storeu_ps(y, vacc);
+      y += 8;
+    }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    const __m256 vx = _mm256_maskload_ps(x, vmask);
+    __m256 vacc = _mm256_mul_ps(vx, vslope);
+    vacc = _mm256_blendv_ps(vx, vacc, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vacc) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vacc_lo = _mm256_castps256_ps128(vacc);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vacc_lo);
+      vacc_lo = _mm256_extractf128_ps(vacc, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vacc_lo);
+      vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vacc_lo);
+    }
+  }
+}