blob: a924ec8c5d4ae3c9c94e5582206bb657870f64ce [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 16 == 0
$assert BATCH_TILE >= 16
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"]
$assert ACTIVATION in ["LINEAR", "MINMAX"]
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
$_MM512_OP_PS = {
$ "ADD": lambda x, y: "_mm512_add_ps(%s, %s)" % (x, y),
$ "DIV": lambda x, y: "_mm512_div_ps(%s, %s)" % (x, y),
$ "MAX": lambda x, y: "_mm512_max_ps(%s, %s)" % (x, y),
$ "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y),
$ "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y),
$ "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
$ "SQRDIFF": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
$}[OP]
$SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
$PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
void xnn_f32_v${OP.lower()}${SUFFIX}_ukernel__avx512f_x${BATCH_TILE}(
size_t n,
const float* a,
const float* b,
float* y,
const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(n != 0);
assert(n % sizeof(float) == 0);
assert(a != NULL);
assert(b != NULL);
assert(y != NULL);
$if ACTIVATION == "MINMAX":
const __m512 vy_min = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
const __m512 vy_max = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const __m512 va${ABC[0:16]} = _mm512_loadu_ps(a);
$for N in range(16, BATCH_TILE, 16):
const __m512 va${ABC[N:N+16]} = _mm512_loadu_ps(a + ${N});
a += ${BATCH_TILE};
const __m512 vb${ABC[0:16]} = _mm512_loadu_ps(b);
$for N in range(16, BATCH_TILE, 16):
const __m512 vb${ABC[N:N+16]} = _mm512_loadu_ps(b + ${N});
b += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 16):
__m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])};
$if OP == "SQRDIFF":
$for N in range(0, BATCH_TILE, 16):
vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]});
$if ACTIVATION == "MINMAX":
$for N in range(0, BATCH_TILE, 16):
vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
$for N in range(0, BATCH_TILE, 16):
vy${ABC[N:N+16]} = _mm512_min_ps(vy${ABC[N:N+16]}, vy_max);
_mm512_storeu_ps(y, vy${ABC[0:16]});
$for N in range(16, BATCH_TILE, 16):
_mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]});
y += ${BATCH_TILE};
}
$if BATCH_TILE > 16:
for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
const __m512 va = _mm512_loadu_ps(a);
a += 16;
const __m512 vb = _mm512_loadu_ps(b);
b += 16;
__m512 vy = ${_MM512_OP_PS("va", "vb")};
$if OP == "SQRDIFF":
vy = _mm512_mul_ps(vy, vy);
$if ACTIVATION == "MINMAX":
vy = _mm512_max_ps(vy, vy_min);
vy = _mm512_min_ps(vy, vy_max);
_mm512_storeu_ps(y, vy);
y += 16;
}
if XNN_UNLIKELY(n != 0) {
assert(n >= 1 * sizeof(float));
assert(n <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on n).
n >>= 2 /* log2(sizeof(float)) */;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
__m512 vy = ${_MM512_OP_PS("va", "vb")};
$if OP == "SQRDIFF":
vy = _mm512_mul_ps(vy, vy);
$if ACTIVATION == "MINMAX":
vy = _mm512_max_ps(vy, vy_min);
vy = _mm512_min_ps(vy, vy_max);
_mm512_mask_storeu_ps(y, vmask, vy);
}
}