WAsm SIMD versions of Leaky ReLU microkernels

PiperOrigin-RevId: 321461564
diff --git a/src/f32-vlrelu/wasmsimd-minmax.c.in b/src/f32-vlrelu/wasmsimd-minmax.c.in
new file mode 100644
index 0000000..5a7832f
--- /dev/null
+++ b/src/f32-vlrelu/wasmsimd-minmax.c.in
@@ -0,0 +1,71 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const v128_t vslope = wasm_v32x4_load_splat(&params->scalar.slope);
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      v128_t vx${ABC[0:4]} = wasm_v128_load(x);
+      $for N in range(4, BATCH_TILE, 4):
+        v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
+      x += ${BATCH_TILE};
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vacc${ABC[N:N+4]} = wasm_i32x4_max(vx${ABC[N:N+4]}, vzero);
+        vx${ABC[N:N+4]} = wasm_i32x4_min(vx${ABC[N:N+4]}, vzero);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vacc${ABC[N:N+4]} = wasm_f32x4_add(vacc${ABC[N:N+4]}, wasm_f32x4_mul(vx${ABC[N:N+4]}, vslope));
+
+      wasm_v128_store(y, vacc${ABC[0:4]});
+      $for N in range(4, BATCH_TILE, 4):
+        wasm_v128_store(y + ${N}, vacc${ABC[N:N+4]});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+    v128_t vacc = wasm_i32x4_max(vx, vzero);
+    vx = wasm_i32x4_min(vx, vzero);
+    vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope));
+    wasm_v128_store(y, vacc);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+    v128_t vacc = wasm_i32x4_max(vx, vzero);
+    vx = wasm_i32x4_min(vx, vzero);
+    vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope));
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vacc, 0);
+      vacc = wasm_v32x4_shuffle(vacc, vacc, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vacc, 0);
+    }
+  }
+}