blob: ff4fb9aac1829ea6aae66c0a3a770db39bebd240 [file] [log] [blame]
Marat Dukhan0f349c42019-11-27 11:58:54 -08001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert NR % 16 == 0
7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
8#include <assert.h>
9
10#include <immintrin.h>
11
12#include <xnnpack/igemm.h>
Marat Dukhancfb31342019-12-05 10:42:57 -080013#include <xnnpack/intrinsics-polyfill.h>
Marat Dukhan0f349c42019-11-27 11:58:54 -080014
15
Marat Dukhande06f492020-04-09 00:19:31 -070016void xnn_f32_igemm_minmax_ukernel_${MR}x${NR}__avx512f_broadcast(
Marat Dukhan0f349c42019-11-27 11:58:54 -080017 size_t mr,
18 size_t nc,
19 size_t kc,
20 size_t ks,
21 const float**restrict a,
22 const float*restrict w,
23 float*restrict c,
24 size_t cm_stride,
25 size_t cn_stride,
26 size_t a_offset,
27 const float* zero,
Marat Dukhanf196d012020-04-15 11:50:03 -070028 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
Marat Dukhan0f349c42019-11-27 11:58:54 -080029{
30 assert(mr != 0);
31 assert(mr <= ${MR});
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(ks != 0);
36 assert(ks % (${MR} * sizeof(void*)) == 0);
37 assert(a_offset % sizeof(float) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 float* c0 = c;
43 $for M in range(1, MR):
44 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
45 $if M % 2 == 0:
46 if XNN_UNPREDICTABLE(mr <= ${M}) {
47 c${M} = c${M-1};
48 }
49 $elif M + 1 == MR:
50 if XNN_UNPREDICTABLE(mr != ${M+1}) {
51 c${M} = c${M-1};
52 }
53 $else:
54 if XNN_UNPREDICTABLE(mr < ${M+1}) {
55 c${M} = c${M-1};
56 }
57
58 do {
59 __m512 vacc0x${ABC[0:16]} = _mm512_load_ps(w);
60 $for N in range(16, NR, 16):
61 __m512 vacc0x${ABC[N:N+16]} = _mm512_load_ps(w + ${N});
62 $for M in range(1, MR):
63 $for N in range(0, NR, 16):
64 __m512 vacc${M}x${ABC[N:N+16]} = vacc0x${ABC[N:N+16]};
65 w += ${NR};
66
67 size_t p = ks;
68 do {
69 $for M in range(MR):
70 const float* restrict a${M} = a[${M}];
71 assert(a${M} != NULL);
72 if XNN_UNPREDICTABLE(a${M} != zero) {
73 a${M} = (const float*) ((uintptr_t) a${M} + a_offset);
74 }
75 a += ${MR};
76
77 size_t k = kc;
78 do {
79 const __m512 vb${ABC[0:16]} = _mm512_load_ps(w);
80 $for N in range(16, NR, 16):
81 const __m512 vb${ABC[N:N+16]} = _mm512_load_ps(w + ${N});
82 w += ${NR};
83
84 $for M in range(MR):
Marat Dukhan0f281932021-12-30 12:51:26 -080085 const __m512 va${M} = _mm512_set1_ps(*a${M});
Marat Dukhan0f349c42019-11-27 11:58:54 -080086 $for N in range(0, NR, 16):
Marat Dukhan0f281932021-12-30 12:51:26 -080087 vacc${M}x${ABC[N:N+16]} = _mm512_fmadd_ps(va${M}, vb${ABC[N:N+16]}, vacc${M}x${ABC[N:N+16]});
Marat Dukhan0f349c42019-11-27 11:58:54 -080088
89 $for M in range(MR):
90 a${M} += 1;
91
92 k -= sizeof(float);
93 } while (k != 0);
94 p -= ${MR} * sizeof(void*);
95 } while (p != 0);
96
Marat Dukhan104ae5e2021-05-24 13:41:57 -070097 const __m512 vmin = _mm512_set1_ps(params->scalar.min);
Marat Dukhan0f349c42019-11-27 11:58:54 -080098 $for N in range(0, NR, 16):
99 $for M in range(MR):
100 vacc${M}x${ABC[N:N+16]} = _mm512_max_ps(vacc${M}x${ABC[N:N+16]}, vmin);
101
Marat Dukhan104ae5e2021-05-24 13:41:57 -0700102 const __m512 vmax = _mm512_set1_ps(params->scalar.max);
103 $for N in range(0, NR, 16):
104 $for M in range(MR):
105 vacc${M}x${ABC[N:N+16]} = _mm512_min_ps(vacc${M}x${ABC[N:N+16]}, vmax);
106
Marat Dukhan0f349c42019-11-27 11:58:54 -0800107 if XNN_LIKELY(nc >= ${NR}) {
108 $for M in reversed(range(MR)):
109 _mm512_storeu_ps(c${M}, vacc${M}x${ABC[0:16]});
110 $for N in range(16, NR, 16):
111 _mm512_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+16]});
112 c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
113
114 a = (const float**restrict) ((uintptr_t) a - ks);
115 nc -= ${NR};
116 } else {
117 $for LOG2N in reversed(range(4, NR.bit_length())):
118 $if NR != 1 << LOG2N:
119 if (nc & ${1 << LOG2N}) {
120 $if LOG2N >= 4:
121 $for M in reversed(range(MR)):
122 _mm512_storeu_ps(c${M}, vacc${M}x${ABC[0:16]});
123 $for N in range(16, 1 << LOG2N, 16):
124 _mm512_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+16]});
125
126 $for M in reversed(range(MR)):
127 $for N in range(0, 1 << (LOG2N - 1), 16):
128 vacc${M}x${ABC[N:N+16]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+16]};
129
130 $for M in reversed(range(MR)):
131 c${M} += ${1 << LOG2N};
132 }
133 $if LOG2N == 4:
134 if (nc & 15) {
135 // Prepare mask for valid 32-bit elements (depends on nc).
136 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
137
138 $for M in reversed(range(MR)):
139 _mm512_mask_storeu_ps(c${M}, vmask, vacc${M}x${ABC[0:16]});
140 }
141
142 nc = 0;
143 }
144 } while (nc != 0);
145}