blob: 51690ed49633c8ac5cbb1fbe8fc5bff06353ff18 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert NR == 2
$assert MR % 2 == 0
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_${MR}x${NR}c4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= ${MR});
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
$for M in range(1, MR):
const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
$if M % 2 == 0:
if XNN_UNPREDICTABLE(mr <= ${M}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
$elif M + 1 == MR:
if XNN_UNPREDICTABLE(mr != ${M+1}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
$else:
if XNN_UNPREDICTABLE(mr < ${M+1}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
do {
__m128 vacc0x0c4 = _mm_load_ss(w);
$for N in range(1, NR):
__m128 vacc0x${N}c4 = _mm_load_ss(w + ${N});
$for M in range(1, MR):
$for N in range(NR):
__m128 vacc${M}x${N}c4 = vacc0x${N}c4;
w += ${NR};
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
$for M in range(MR):
const __m128 va${M} = _mm_loadu_ps(a${M});
a${M} += 4;
const __m128 vb0 = _mm_loadu_ps(w);
$for N in range(1, NR):
const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
w += ${NR * 4};
$for M in range(MR):
$for N in range(NR):
vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(va${M}, vb${N}));
}
if XNN_UNLIKELY(k != 0) {
$for M in range(MR):
const __m128 va${M} = _mm_loadu_ps(a${M});
a${M} = (const float*) ((uintptr_t) a${M} + k);
const __m128 vb0 = _mm_loadu_ps(w);
$for N in range(1, NR):
const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
w += ${NR * 4};
$for N in range(NR):
const __m128 vmask${N} = _mm_cmpeq_ps(_mm_setzero_ps(), vb${N});
$for M in range(MR):
$for N in range(NR):
vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(_mm_andnot_ps(vmask${N}, va${M}), vb${N}));
}
$for M in range(MR):
const __m128 vacc${M}x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc${M}x0c4, vacc${M}x1c4), _mm_unpackhi_ps(vacc${M}x0c4, vacc${M}x1c4));
$for M in range(0, MR, 2):
__m128 vacc${M}${M+1}x01 = _mm_add_ps(_mm_movelh_ps(vacc${M}x01c2, vacc${M+1}x01c2), _mm_movehl_ps(vacc${M+1}x01c2, vacc${M}x01c2));
const __m128 vmax = _mm_load_ps(params->sse.max);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = _mm_min_ps(vacc${M}${M+1}x01, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = _mm_max_ps(vacc${M}${M+1}x01, vmin);
if XNN_LIKELY(nc >= ${NR}) {
$for M in reversed(range(0, MR, 2)):
_mm_storel_pi((__m64*) c${M}, vacc${M}${M+1}x01);
c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
a${M} = (const float*) ((uintptr_t) a${M} - kc);
_mm_storeh_pi((__m64*) c${M+1}, vacc${M}${M+1}x01);
c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride);
a${M+1} = (const float*) ((uintptr_t) a${M+1} - kc);
nc -= ${NR};
} else {
assert(nc == 1);
$for M in reversed(range(0, MR, 2)):
_mm_store_ss(c${M}, vacc${M}${M+1}x01);
_mm_store_ss(c${M+1}, _mm_movehl_ps(vacc${M}${M+1}x01, vacc${M}${M+1}x01));
nc = 0;
}
} while (nc != 0);
}