blob: b35a8b9e67de7e043960fb891b60680f21686f5a [file] [log] [blame]
Marat Dukhan0f349c42019-11-27 11:58:54 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx512-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_8x16__avx512f_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_output_params params[restrict static 1])
28{
29 assert(mr != 0);
30 assert(mr <= 8);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66 if XNN_UNPREDICTABLE(mr < 6) {
67 a5 = a4;
68 c5 = c4;
69 }
70 const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
72 if XNN_UNPREDICTABLE(mr <= 6) {
73 a6 = a5;
74 c6 = c5;
75 }
76 const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
77 float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
78 if XNN_UNPREDICTABLE(mr != 8) {
79 a7 = a6;
80 c7 = c6;
81 }
82
83 do {
84 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
85 __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
86 __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
87 __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
88 __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
89 __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
90 __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
91 __m512 vacc7x0123456789ABCDEF = vacc0x0123456789ABCDEF;
92 w += 16;
93
94 size_t k = kc;
95 do {
96 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
97 w += 16;
98
99 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
100 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
101 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
102 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
103 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
104 vacc5x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a5), vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
105 vacc6x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a6), vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
106 vacc7x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a7), vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
107
108 a0 += 1;
109 a1 += 1;
110 a2 += 1;
111 a3 += 1;
112 a4 += 1;
113 a5 += 1;
114 a6 += 1;
115 a7 += 1;
116
117 k -= sizeof(float);
118 } while (k != 0);
119
120 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
121 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
122 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
123 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
124 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
125 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
126 vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
127 vacc6x0123456789ABCDEF = _mm512_min_ps(vacc6x0123456789ABCDEF, vmax);
128 vacc7x0123456789ABCDEF = _mm512_min_ps(vacc7x0123456789ABCDEF, vmax);
129
130 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
131 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
132 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
133 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
134 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
135 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
136 vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
137 vacc6x0123456789ABCDEF = _mm512_max_ps(vacc6x0123456789ABCDEF, vmin);
138 vacc7x0123456789ABCDEF = _mm512_max_ps(vacc7x0123456789ABCDEF, vmin);
139
140 if XNN_LIKELY(nc >= 16) {
141 _mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
142 c7 = (float*) ((uintptr_t) c7 + cn_stride);
143 _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
144 c6 = (float*) ((uintptr_t) c6 + cn_stride);
145 _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
146 c5 = (float*) ((uintptr_t) c5 + cn_stride);
147 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
148 c4 = (float*) ((uintptr_t) c4 + cn_stride);
149 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
150 c3 = (float*) ((uintptr_t) c3 + cn_stride);
151 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
152 c2 = (float*) ((uintptr_t) c2 + cn_stride);
153 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
154 c1 = (float*) ((uintptr_t) c1 + cn_stride);
155 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
156 c0 = (float*) ((uintptr_t) c0 + cn_stride);
157
158 a7 = (const float*) ((uintptr_t) a7 - kc);
159 a6 = (const float*) ((uintptr_t) a6 - kc);
160 a5 = (const float*) ((uintptr_t) a5 - kc);
161 a4 = (const float*) ((uintptr_t) a4 - kc);
162 a3 = (const float*) ((uintptr_t) a3 - kc);
163 a2 = (const float*) ((uintptr_t) a2 - kc);
164 a1 = (const float*) ((uintptr_t) a1 - kc);
165 a0 = (const float*) ((uintptr_t) a0 - kc);
166
167 nc -= 16;
168 } else {
169 if (nc & 15) {
170 // Prepare mask for valid 32-bit elements (depends on nc).
171 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
172
173 _mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
174 _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
175 _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
176 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
177 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
178 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
179 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
180 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
181 }
182
183 nc = 0;
184 }
185 } while (nc != 0);
186}