blob: 5d1d6d5a148f4be176136f50628f07a0a5ac404e [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/sse-dup.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <xmmintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemminc_ukernel_1x8__sse_dup(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const float*restrict acc,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
XNNPACK Teamb455b122019-09-27 18:10:33 -070029{
30 assert(mr != 0);
31 assert(mr <= 1);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42
43 do {
44 __m128 vacc0x0123 = _mm_load_ps(acc + 0);
45 __m128 vacc0x4567 = _mm_load_ps(acc + 4);
46 acc += 8;
47
48 size_t k = kc;
49 while (k >= 4 * sizeof(float)) {
50 const __m128 va0 = _mm_loadu_ps(a0);
51 a0 += 4;
52
53
54 const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
55
56 const __m128 vb0123c0 = _mm_load_ps(w + 0);
57 const __m128 vb4567c0 = _mm_load_ps(w + 4);
58
59 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
60 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
61
62 const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
63
64 const __m128 vb0123c1 = _mm_load_ps(w + 8);
65 const __m128 vb4567c1 = _mm_load_ps(w + 12);
66
67 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
68 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
69
70 const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
71
72 const __m128 vb0123c2 = _mm_load_ps(w + 16);
73 const __m128 vb4567c2 = _mm_load_ps(w + 20);
74
75 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
76 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
77
78 const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
79
80 const __m128 vb0123c3 = _mm_load_ps(w + 24);
81 const __m128 vb4567c3 = _mm_load_ps(w + 28);
82
83 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
84 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
85
86 w += 32;
87 k -= 4 * sizeof(float);
88 }
89 if XNN_UNLIKELY(k != 0) {
90 do {
91 const __m128 va0 = _mm_load1_ps(a0);
92 a0 += 1;
93
94 const __m128 vb0123 = _mm_load_ps(w);
95 const __m128 vb4567 = _mm_load_ps(w + 4);
96 w += 8;
97
98 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
99 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
100
101 k -= sizeof(float);
102 } while (k != 0);
103 }
104
105 const __m128 vmax = _mm_load_ps(params->sse.max);
106 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
107 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
108
109 const __m128 vmin = _mm_load_ps(params->sse.min);
110 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
111 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
112
113 if XNN_LIKELY(nc >= 8) {
114 _mm_storeu_ps(c0, vacc0x0123);
115 _mm_storeu_ps(c0 + 4, vacc0x4567);
116 c0 = (float*) ((uintptr_t) c0 + cn_stride);
117
118 a0 = (const float*) ((uintptr_t) a0 - kc);
119
120 nc -= 8;
121 } else {
122 if (nc & 4) {
123 _mm_storeu_ps(c0, vacc0x0123);
124
125 vacc0x0123 = vacc0x4567;
126
127 c0 += 4;
128 }
129 if (nc & 2) {
130 _mm_storel_pi((__m64*) c0, vacc0x0123);
131
132 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
133
134 c0 += 2;
135 }
136 if (nc & 1) {
137 _mm_store_ss(c0, vacc0x0123);
138 }
139
140 nc = 0;
141 }
142 } while (nc != 0);
143}