blob: 59f41bc2aabddff7b9ad8de0a468746276fed2b0 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/sse-dup.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <xmmintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_1x8__sse_dup(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070027 const union xnn_f32_minmax_params params[restrict static 1])
XNNPACK Teamb455b122019-09-27 18:10:33 -070028{
29 assert(mr != 0);
30 assert(mr <= 1);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40
41 do {
42 __m128 vacc0x0123 = _mm_load_ps(w + 0);
43 __m128 vacc0x4567 = _mm_load_ps(w + 4);
44 w += 8;
45
46 size_t k = kc;
47 while (k >= 4 * sizeof(float)) {
48 const __m128 va0 = _mm_loadu_ps(a0);
49 a0 += 4;
50
51
52 const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
53
54 const __m128 vb0123c0 = _mm_load_ps(w + 0);
55 const __m128 vb4567c0 = _mm_load_ps(w + 4);
56
57 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
58 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
59
60 const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
61
62 const __m128 vb0123c1 = _mm_load_ps(w + 8);
63 const __m128 vb4567c1 = _mm_load_ps(w + 12);
64
65 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
66 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
67
68 const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
69
70 const __m128 vb0123c2 = _mm_load_ps(w + 16);
71 const __m128 vb4567c2 = _mm_load_ps(w + 20);
72
73 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
74 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
75
76 const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
77
78 const __m128 vb0123c3 = _mm_load_ps(w + 24);
79 const __m128 vb4567c3 = _mm_load_ps(w + 28);
80
81 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
82 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
83
84 w += 32;
85 k -= 4 * sizeof(float);
86 }
87 if XNN_UNLIKELY(k != 0) {
88 do {
89 const __m128 va0 = _mm_load1_ps(a0);
90 a0 += 1;
91
92 const __m128 vb0123 = _mm_load_ps(w);
93 const __m128 vb4567 = _mm_load_ps(w + 4);
94 w += 8;
95
96 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
97 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
98
99 k -= sizeof(float);
100 } while (k != 0);
101 }
102
103 const __m128 vmax = _mm_load_ps(params->sse.max);
104 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
105 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
106
107 const __m128 vmin = _mm_load_ps(params->sse.min);
108 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
109 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
110
111 if XNN_LIKELY(nc >= 8) {
112 _mm_storeu_ps(c0, vacc0x0123);
113 _mm_storeu_ps(c0 + 4, vacc0x4567);
114 c0 = (float*) ((uintptr_t) c0 + cn_stride);
115
116 a0 = (const float*) ((uintptr_t) a0 - kc);
117
118 nc -= 8;
119 } else {
120 if (nc & 4) {
121 _mm_storeu_ps(c0, vacc0x0123);
122
123 vacc0x0123 = vacc0x4567;
124
125 c0 += 4;
126 }
127 if (nc & 2) {
128 _mm_storel_pi((__m64*) c0, vacc0x0123);
129
130 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
131
132 c0 += 2;
133 }
134 if (nc & 1) {
135 _mm_store_ss(c0, vacc0x0123);
136 }
137
138 nc = 0;
139 }
140 } while (nc != 0);
141}