blob: fb24b24ff95069adf2cd24f0782f24dd41bb1f46 [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_4x8__avx_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070027 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080028{
29 assert(mr != 0);
30 assert(mr <= 4);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr != 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58
59 do {
60 __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
61 __m256 vacc1x01234567 = vacc0x01234567;
62 __m256 vacc2x01234567 = vacc0x01234567;
63 __m256 vacc3x01234567 = vacc0x01234567;
64 w += 8;
65
66 size_t k = kc;
67 do {
68 const __m256 va0 = _mm256_broadcast_ss(a0);
69 a0 += 1;
70 const __m256 va1 = _mm256_broadcast_ss(a1);
71 a1 += 1;
72 const __m256 va2 = _mm256_broadcast_ss(a2);
73 a2 += 1;
74 const __m256 va3 = _mm256_broadcast_ss(a3);
75 a3 += 1;
76
77 const __m256 vb01234567 = _mm256_load_ps(w);
78 w += 8;
79
80 vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
81 vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
82 vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
83 vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
84
85 k -= sizeof(float);
86 } while (k != 0);
87
88 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
89 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
90 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
91 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
92 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
93
94 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
95 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
96 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
97 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
98 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
99
100 if XNN_LIKELY(nc >= 8) {
101 _mm256_storeu_ps(c3, vacc3x01234567);
102 c3 = (float*) ((uintptr_t) c3 + cn_stride);
103 _mm256_storeu_ps(c2, vacc2x01234567);
104 c2 = (float*) ((uintptr_t) c2 + cn_stride);
105 _mm256_storeu_ps(c1, vacc1x01234567);
106 c1 = (float*) ((uintptr_t) c1 + cn_stride);
107 _mm256_storeu_ps(c0, vacc0x01234567);
108 c0 = (float*) ((uintptr_t) c0 + cn_stride);
109
110 a3 = (const float*) ((uintptr_t) a3 - kc);
111 a2 = (const float*) ((uintptr_t) a2 - kc);
112 a1 = (const float*) ((uintptr_t) a1 - kc);
113 a0 = (const float*) ((uintptr_t) a0 - kc);
114
115 nc -= 8;
116 } else {
117 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
118 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
119 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
120 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
121 if (nc & 4) {
122 _mm_storeu_ps(c3, vacc3x0123);
123 _mm_storeu_ps(c2, vacc2x0123);
124 _mm_storeu_ps(c1, vacc1x0123);
125 _mm_storeu_ps(c0, vacc0x0123);
126
127 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
128 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
129 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
130 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
131
132 c3 += 4;
133 c2 += 4;
134 c1 += 4;
135 c0 += 4;
136 }
137 if (nc & 2) {
138 _mm_storel_pi((__m64*) c3, vacc3x0123);
139 _mm_storel_pi((__m64*) c2, vacc2x0123);
140 _mm_storel_pi((__m64*) c1, vacc1x0123);
141 _mm_storel_pi((__m64*) c0, vacc0x0123);
142
143 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
144 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
145 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
146 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
147
148 c3 += 2;
149 c2 += 2;
150 c1 += 2;
151 c0 += 2;
152 }
153 if (nc & 1) {
154 _mm_store_ss(c3, vacc3x0123);
155 _mm_store_ss(c2, vacc2x0123);
156 _mm_store_ss(c1, vacc1x0123);
157 _mm_store_ss(c0, vacc0x0123);
158 }
159
160 nc = 0;
161 }
162 } while (nc != 0);
163}