Initial open-source release

PiperOrigin-RevId: 271685289
diff --git a/src/f32-igemm/MRx2c4-sse.c.in b/src/f32-igemm/MRx2c4-sse.c.in
new file mode 100644
index 0000000..9851646
--- /dev/null
+++ b/src/f32-igemm/MRx2c4-sse.c.in
@@ -0,0 +1,138 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert NR == 2
+$assert MR % 2 == 0
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_${MR}x${NR}c4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (${MR} * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+
+  float* c0 = c;
+  $for M in range(1, MR):
+    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        c${M} = c${M-1};
+      }
+
+  do {
+    __m128 vacc0x0c4 = _mm_load_ss(w);
+    $for N in range(1, NR):
+      __m128 vacc0x${N}c4 = _mm_load_ss(w + ${N});
+    $for M in range(1, MR):
+      $for N in range(NR):
+        __m128 vacc${M}x${N}c4 = vacc0x${N}c4;
+    w += ${NR};
+
+    size_t p = ks;
+    do {
+      $for M in range(MR):
+        const float* restrict a${M} = a[${M}];
+        if XNN_UNPREDICTABLE(a${M} != zero) {
+          a${M} = (const float*) ((uintptr_t) a${M} + a_offset);
+        }
+      a += ${MR};
+
+      size_t k = kc;
+      for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
+        $for M in range(MR):
+          const __m128 va${M} = _mm_loadu_ps(a${M});
+          a${M} += 4;
+
+        const __m128 vb0 = _mm_loadu_ps(w);
+        $for N in range(1, NR):
+          const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
+        w += ${NR * 4};
+
+        $for M in range(MR):
+          $for N in range(NR):
+            vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(va${M}, vb${N}));
+      }
+      if XNN_UNLIKELY(k != 0) {
+        $for M in range(MR):
+          const __m128 va${M} = _mm_loadu_ps(a${M});
+
+        const __m128 vb0 = _mm_loadu_ps(w);
+        $for N in range(1, NR):
+          const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
+        w += ${NR * 4};
+
+        $for N in range(NR):
+          const __m128 vmask${N} = _mm_cmpeq_ps(_mm_setzero_ps(), vb${N});
+
+        $for M in range(MR):
+          $for N in range(NR):
+            vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(_mm_andnot_ps(vmask${N}, va${M}), vb${N}));
+      }
+      p -= ${MR} * sizeof(void*);
+    } while (p != 0);
+
+    $for M in range(MR):
+      const __m128 vacc${M}x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc${M}x0c4, vacc${M}x1c4), _mm_unpackhi_ps(vacc${M}x0c4, vacc${M}x1c4));
+
+    $for M in range(0, MR, 2):
+      __m128 vacc${M}${M+1}x01 = _mm_add_ps(_mm_movelh_ps(vacc${M}x01c2, vacc${M+1}x01c2), _mm_movehl_ps(vacc${M+1}x01c2, vacc${M}x01c2));
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    $for M in range(0, MR, 2):
+      vacc${M}${M+1}x01 = _mm_min_ps(vacc${M}${M+1}x01, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    $for M in range(0, MR, 2):
+      vacc${M}${M+1}x01 = _mm_max_ps(vacc${M}${M+1}x01, vmin);
+
+    if XNN_LIKELY(nc >= ${NR}) {
+      $for M in reversed(range(0, MR, 2)):
+        _mm_storeh_pi((__m64*) c${M+1}, vacc${M}${M+1}x01);
+        c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride);
+        _mm_storel_pi((__m64*) c${M}, vacc${M}${M+1}x01);
+        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= ${NR};
+    } else {
+      assert(nc == 1);
+      $for M in reversed(range(0, MR, 2)):
+        _mm_store_ss(c${M+1}, _mm_movehl_ps(vacc${M}${M+1}x01, vacc${M}${M+1}x01));
+        _mm_store_ss(c${M}, vacc${M}${M+1}x01);
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}