blob: 84594a3b30018765c8a9804135d6b051914a71e4 [file] [log] [blame]
Marat Dukhan9a88efe2019-12-10 15:54:24 -08001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 16 == 0
7$assert BATCH_TILE >= 16
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB"]
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vbinary.h>
16
17
18$_MM512_OP_PS = {
19$ "ADD": lambda x, y: "_mm512_add_ps(%s, %s)" % (x, y),
20$ "DIV": lambda x, y: "_mm512_div_ps(%s, %s)" % (x, y),
21$ "MAX": lambda x, y: "_mm512_max_ps(%s, %s)" % (x, y),
22$ "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y),
23$ "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y),
24$ "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y),
25$}[OP]
26void xnn_f32_v${OP.lower()}_ukernel__avx512f_x${BATCH_TILE}(
27 size_t n,
28 const float* a,
29 const float* b,
30 float* y,
31 const union xnn_f32_output_params params[restrict static 1])
32{
33 assert(n != 0);
34 assert(n % sizeof(float) == 0);
35
36 const __m512 vy_min = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
37 const __m512 vy_max = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
38
39 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
40 const __m512 va${ABC[0:16]} = _mm512_loadu_ps(a);
41 $for N in range(16, BATCH_TILE, 16):
42 const __m512 va${ABC[N:N+16]} = _mm512_loadu_ps(a + ${N});
43 a += ${BATCH_TILE};
44
45 const __m512 vb${ABC[0:16]} = _mm512_loadu_ps(b);
46 $for N in range(16, BATCH_TILE, 16):
47 const __m512 vb${ABC[N:N+16]} = _mm512_loadu_ps(b + ${N});
48 b += ${BATCH_TILE};
49
50 $for N in range(0, BATCH_TILE, 16):
51 __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])};
52
53 $for N in range(0, BATCH_TILE, 16):
54 vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min);
55
56 $for N in range(0, BATCH_TILE, 16):
57 vy${ABC[N:N+16]} = _mm512_min_ps(vy${ABC[N:N+16]}, vy_max);
58
59 _mm512_storeu_ps(y, vy${ABC[0:16]});
60 $for N in range(16, BATCH_TILE, 16):
61 _mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]});
62 y += ${BATCH_TILE};
63 }
64 $if BATCH_TILE >= 16:
65 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
66 const __m512 va = _mm512_loadu_ps(a);
67 a += 16;
68
69 const __m512 vb = _mm512_loadu_ps(b);
70 b += 16;
71
72 __m512 vy = ${_MM512_OP_PS("va", "vb")};
73 vy = _mm512_max_ps(vy, vy_min);
74 vy = _mm512_min_ps(vy, vy_max);
75 _mm512_storeu_ps(y, vy);
76 y += 16;
77 }
78 if XNN_UNLIKELY(n != 0) {
79 assert(n >= 1 * sizeof(float));
80 assert(n <= 15 * sizeof(float));
81 // Prepare mask for valid 32-bit elements (depends on n).
82 n >>= 2 /* log2(sizeof(float)) */;
83 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
84
85 const __m512 va = _mm512_maskz_loadu_ps(vmask, a);
86 const __m512 vb = _mm512_maskz_loadu_ps(vmask, b);
87
88 __m512 vy = ${_MM512_OP_PS("va", "vb")};
89 vy = _mm512_max_ps(vy, vy_min);
90 vy = _mm512_min_ps(vy, vy_max);
91 _mm512_mask_storeu_ps(y, vmask, vy);
92 }
93}