blob: 0614bb252b74dc717df8731b48c181ad7007c714 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/sse-shuffle.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <xmmintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_1x8s4__sse(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070027 const union xnn_f32_minmax_params params[restrict static 1])
XNNPACK Teamb455b122019-09-27 18:10:33 -070028{
29 assert(mr != 0);
30 assert(mr <= 1);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40
41 do {
42 __m128 vacc0x0123 = _mm_load_ps(w + 0);
43 __m128 vacc0x4567 = _mm_load_ps(w + 4);
44 w += 8;
45
46 size_t k = kc;
47 while (k >= 4 * sizeof(float)) {
48 __m128 va0 = _mm_loadu_ps(a0);
49 a0 += 4;
50
51
52 const __m128 vb0123c0 = _mm_load_ps(w + 0);
53 const __m128 vb4567c0 = _mm_load_ps(w + 4);
54
55 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
56 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
57
58 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
59
60 const __m128 vb0123c1 = _mm_load_ps(w + 8);
61 const __m128 vb4567c1 = _mm_load_ps(w + 12);
62
63 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
64 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
65
66 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
67
68 const __m128 vb0123c2 = _mm_load_ps(w + 16);
69 const __m128 vb4567c2 = _mm_load_ps(w + 20);
70
71 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
72 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
73
74 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
75
76 const __m128 vb0123c3 = _mm_load_ps(w + 24);
77 const __m128 vb4567c3 = _mm_load_ps(w + 28);
78
79 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
80 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
81
82
83 w += 32;
84 k -= 4 * sizeof(float);
85 }
86 if XNN_UNLIKELY(k != 0) {
87 do {
88 const __m128 va0 = _mm_load1_ps(a0);
89 a0 += 1;
90
91 const __m128 vb0123 = _mm_load_ps(w);
92 const __m128 vb4567 = _mm_load_ps(w + 4);
93 w += 8;
94
95 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
96 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
97
98 k -= sizeof(float);
99 } while (k != 0);
100 }
101
102 const __m128 vmax = _mm_load_ps(params->sse.max);
103 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
104 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
105
106 const __m128 vmin = _mm_load_ps(params->sse.min);
107 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
108 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
109
110 if XNN_LIKELY(nc >= 8) {
111 _mm_storeu_ps(c0, vacc0x0123);
112 _mm_storeu_ps(c0 + 4, vacc0x4567);
113 c0 = (float*) ((uintptr_t) c0 + cn_stride);
114
115 a0 = (const float*) ((uintptr_t) a0 - kc);
116
117 nc -= 8;
118 } else {
119 if (nc & 4) {
120 _mm_storeu_ps(c0, vacc0x0123);
121
122 vacc0x0123 = vacc0x4567;
123
124 c0 += 4;
125 }
126 if (nc & 2) {
127 _mm_storel_pi((__m64*) c0, vacc0x0123);
128
129 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
130
131 c0 += 2;
132 }
133 if (nc & 1) {
134 _mm_store_ss(c0, vacc0x0123);
135 }
136
137 nc = 0;
138 }
139 } while (nc != 0);
140}