blob: 6460ebe5144a13259876883a1b44e686c7445d71 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 16 == 0
$assert BATCH_TILE >= 16
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB"]
$assert ACTIVATION in ["LINEAR", "MINMAX"]
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
$_MM512_OP_PS = {
$ "ADD": lambda x: "_mm512_add_ps(%s, vb)" % x,
$ "DIV": lambda x: "_mm512_div_ps(%s, vb)" % x,
$ "RDIV": lambda x: "_mm512_div_ps(vb, %s)" % x,
$ "MAX": lambda x: "_mm512_max_ps(%s, vb)" % x,
$ "MIN": lambda x: "_mm512_min_ps(%s, vb)" % x,
$ "MUL": lambda x: "_mm512_mul_ps(%s, vb)" % x,
$ "SUB": lambda x: "_mm512_sub_ps(%s, vb)" % x,
$ "RSUB": lambda x: "_mm512_sub_ps(vb, %s)" % x,
$}[OP]
$SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION]
$PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
void xnn_f32_v${OP.lower()}c${SUFFIX}_ukernel__avx512f_x${BATCH_TILE}(
size_t n,
const float* a,
const float* b,
float* y,
const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(n != 0);
assert(n % sizeof(float) == 0);
$if ACTIVATION == "MINMAX":
const __m512 vy_min = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
const __m512 vy_max = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
const __m512 vb = _mm512_set1_ps(*b);
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const __m512 va${ABC[0:16]} = _mm512_loadu_ps(a);
$for N in range(16, BATCH_TILE, 16):
const __m512 va${ABC[N:N+16]} = _mm512_loadu_ps(a + ${N});
a += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 16):
__m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16])};
$if ACTIVATION == "MINMAX":
$for N in range(0, BATCH_TILE, 16):
vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
$for N in range(0, BATCH_TILE, 16):
vy${ABC[N:N+16]} = _mm512_min_ps(vy${ABC[N:N+16]}, vy_max);
_mm512_storeu_ps(y, vy${ABC[0:16]});
$for N in range(16, BATCH_TILE, 16):
_mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]});
y += ${BATCH_TILE};
}
$if BATCH_TILE >= 16:
for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
const __m512 va = _mm512_loadu_ps(a);
a += 16;
__m512 vy = ${_MM512_OP_PS("va")};
$if ACTIVATION == "MINMAX":
vy = _mm512_max_ps(vy, vy_min);
vy = _mm512_min_ps(vy, vy_max);
_mm512_storeu_ps(y, vy);
y += 16;
}
if XNN_UNLIKELY(n != 0) {
assert(n >= 1 * sizeof(float));
assert(n <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on n).
n >>= 2 /* log2(sizeof(float)) */;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
__m512 vy = ${_MM512_OP_PS("va")};
$if ACTIVATION == "MINMAX":
vy = _mm512_max_ps(vy, vy_min);
vy = _mm512_min_ps(vy, vy_max);
_mm512_mask_storeu_ps(y, vmask, vy);
}
}