AVX and FMA3 microkernels for GEMM/GEMMINC/IGEMM

PiperOrigin-RevId: 281807374
diff --git a/src/f32-gemm/avx-broadcast.c.in b/src/f32-gemm/avx-broadcast.c.in
new file mode 100644
index 0000000..90d7e08
--- /dev/null
+++ b/src/f32-gemm/avx-broadcast.c.in
@@ -0,0 +1,163 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert NR % 4 == 0
+$ABC = "0123456789ABCDEFGHIJKLMN"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+$ISA = {0: "avx", 3: "fma3"}[FMA]
+void xnn_f32_gemm${"inc" if INC else ""}_ukernel_${MR}x${NR}__${ISA}_broadcast(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    $if INC:
+      const float*restrict acc,
+    const union xnn_f32_output_params params[restrict static 1])
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  $if INC:
+    assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  $for M in range(1, MR):
+    const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
+    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+
+  do {
+    $if INC:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          __m256 vacc${M}x${ABC[N:N+8]} = _mm256_load_ps(acc + ${M*NR+N});
+      acc += ${MR*NR};
+    $else:
+      $for N in range(0, NR, 8):
+        __m256 vacc0x${ABC[N:N+8]} = _mm256_load_ps(w + ${N});
+      $for M in range(1, MR):
+        $for N in range(0, NR, 8):
+          __m256 vacc${M}x${ABC[N:N+8]} = vacc0x${ABC[N:N+8]};
+      w += ${NR};
+
+    size_t k = kc;
+    do {
+      $for M in range(MR):
+        const __m256 va${M} = _mm256_broadcast_ss(a${M});
+        a${M} += 1;
+
+      const __m256 vb${ABC[0:8]} = _mm256_load_ps(w);
+      $for N in range(8, NR, 8):
+        const __m256 vb${ABC[N:N+8]} = _mm256_load_ps(w + ${N});
+      w += ${NR};
+
+      $for N in range(0, NR, 8):
+        $for M in range(MR):
+          $if FMA == 3:
+            vacc${M}x${ABC[N:N+8]} = _mm256_fmadd_ps(va${M}, vb${ABC[N:N+8]}, vacc${M}x${ABC[N:N+8]});
+          $else:
+            vacc${M}x${ABC[N:N+8]} = _mm256_add_ps(vacc${M}x${ABC[N:N+8]}, _mm256_mul_ps(va${M}, vb${ABC[N:N+8]}));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+    $for N in range(0, NR, 8):
+      $for M in range(MR):
+        vacc${M}x${ABC[N:N+8]} = _mm256_min_ps(vacc${M}x${ABC[N:N+8]}, vmax);
+
+    const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+    $for N in range(0, NR, 8):
+      $for M in range(MR):
+        vacc${M}x${ABC[N:N+8]} = _mm256_max_ps(vacc${M}x${ABC[N:N+8]}, vmin);
+
+    if XNN_LIKELY(nc >= ${NR}) {
+      $for M in reversed(range(MR)):
+        _mm256_storeu_ps(c${M}, vacc${M}x${ABC[0:8]});
+        $for N in range(8, NR, 8):
+          _mm256_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+8]});
+        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
+
+      $for M in reversed(range(MR)):
+        a${M} = (const float*) ((uintptr_t) a${M} - kc);
+
+      nc -= ${NR};
+    } else {
+      $for LOG2N in reversed(range(NR.bit_length())):
+        $if NR != 1 << LOG2N:
+          if (nc & ${1 << LOG2N}) {
+            $if LOG2N >= 3:
+              $for M in reversed(range(MR)):
+                _mm256_storeu_ps(c${M}, vacc${M}x${ABC[0:8]});
+                $for N in range(8, 1 << LOG2N, 8):
+                  _mm256_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+8]});
+
+              $for M in reversed(range(MR)):
+                $for N in range(0, 1 << (LOG2N - 1), 8):
+                  vacc${M}x${ABC[N:N+8]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+8]};
+
+              $for M in reversed(range(MR)):
+                c${M} += ${1 << LOG2N};
+            $elif LOG2N == 2:
+              $for M in reversed(range(MR)):
+                _mm_storeu_ps(c${M}, vacc${M}x${ABC[0:4]});
+
+              $for M in reversed(range(MR)):
+                vacc${M}x${ABC[0:4]} = _mm256_extractf128_ps(vacc${M}x${ABC[0:8]}, 1);
+
+              $for M in reversed(range(MR)):
+                c${M} += 4;
+            $elif LOG2N == 1:
+              $for M in reversed(range(MR)):
+                _mm_storel_pi((__m64*) c${M}, vacc${M}x${ABC[0:4]});
+
+              $for M in reversed(range(MR)):
+                vacc${M}x${ABC[0:4]} = _mm_movehl_ps(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]});
+
+              $for M in reversed(range(MR)):
+                c${M} += 2;
+            $elif LOG2N == 0:
+              $for M in reversed(range(MR)):
+                _mm_store_ss(c${M}, vacc${M}x${ABC[0:4]});
+          }
+        $if LOG2N == 3:
+          $for M in reversed(range(MR)):
+            __m128 vacc${M}x${ABC[0:4]} = _mm256_castps256_ps128(vacc${M}x${ABC[0:8]});
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}