blob: 6f2631ab2a4e70df731ef932a071d3188af18b58 [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemminc_ukernel_4x8__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const float*restrict acc,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080029{
30 assert(mr != 0);
31 assert(mr <= 4);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr != 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60
61 do {
62 __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
63 __m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
64 __m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
65 __m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
66 acc += 32;
67
68 size_t k = kc;
69 do {
70 const __m256 va0 = _mm256_broadcast_ss(a0);
71 a0 += 1;
72 const __m256 va1 = _mm256_broadcast_ss(a1);
73 a1 += 1;
74 const __m256 va2 = _mm256_broadcast_ss(a2);
75 a2 += 1;
76 const __m256 va3 = _mm256_broadcast_ss(a3);
77 a3 += 1;
78
79 const __m256 vb01234567 = _mm256_load_ps(w);
80 w += 8;
81
82 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
83 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
84 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
85 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
86
87 k -= sizeof(float);
88 } while (k != 0);
89
90 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
91 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
92 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
93 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
94 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
95
96 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
97 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
98 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
99 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
100 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
101
102 if XNN_LIKELY(nc >= 8) {
103 _mm256_storeu_ps(c3, vacc3x01234567);
104 c3 = (float*) ((uintptr_t) c3 + cn_stride);
105 _mm256_storeu_ps(c2, vacc2x01234567);
106 c2 = (float*) ((uintptr_t) c2 + cn_stride);
107 _mm256_storeu_ps(c1, vacc1x01234567);
108 c1 = (float*) ((uintptr_t) c1 + cn_stride);
109 _mm256_storeu_ps(c0, vacc0x01234567);
110 c0 = (float*) ((uintptr_t) c0 + cn_stride);
111
112 a3 = (const float*) ((uintptr_t) a3 - kc);
113 a2 = (const float*) ((uintptr_t) a2 - kc);
114 a1 = (const float*) ((uintptr_t) a1 - kc);
115 a0 = (const float*) ((uintptr_t) a0 - kc);
116
117 nc -= 8;
118 } else {
119 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
120 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
121 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
122 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
123 if (nc & 4) {
124 _mm_storeu_ps(c3, vacc3x0123);
125 _mm_storeu_ps(c2, vacc2x0123);
126 _mm_storeu_ps(c1, vacc1x0123);
127 _mm_storeu_ps(c0, vacc0x0123);
128
129 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
130 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
131 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
132 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
133
134 c3 += 4;
135 c2 += 4;
136 c1 += 4;
137 c0 += 4;
138 }
139 if (nc & 2) {
140 _mm_storel_pi((__m64*) c3, vacc3x0123);
141 _mm_storel_pi((__m64*) c2, vacc2x0123);
142 _mm_storel_pi((__m64*) c1, vacc1x0123);
143 _mm_storel_pi((__m64*) c0, vacc0x0123);
144
145 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
146 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
147 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
148 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
149
150 c3 += 2;
151 c2 += 2;
152 c1 += 2;
153 c0 += 2;
154 }
155 if (nc & 1) {
156 _mm_store_ss(c3, vacc3x0123);
157 _mm_store_ss(c2, vacc2x0123);
158 _mm_store_ss(c1, vacc1x0123);
159 _mm_store_ss(c0, vacc0x0123);
160 }
161
162 nc = 0;
163 }
164 } while (nc != 0);
165}