blob: 11925e033ea2eab2b05cf25387515e273e426c94 [file] [log] [blame]
Marat Dukhan5098c3e2019-11-07 12:01:19 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-dwconv/up-sse.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <xmmintrin.h>
13
14#include <xnnpack/dwconv.h>
15
16
17void xnn_f32_dwconv_ukernel_up8x4__sse(
18 size_t channels,
19 size_t output_width,
20 const float** input,
21 const float* weights,
22 float* output,
23 size_t input_stride,
24 size_t output_increment,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070025 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhan5098c3e2019-11-07 12:01:19 -080026{
27 assert(channels != 0);
28 assert(output_width != 0);
29
30 const __m128 vmax = _mm_load_ps(params->sse.max);
31 const __m128 vmin = _mm_load_ps(params->sse.min);
32 do {
33 const float* i0 = input[0];
Marat Dukhan68660992020-02-03 13:31:12 -080034 assert(i0 != NULL);
Marat Dukhan5098c3e2019-11-07 12:01:19 -080035 const float* i1 = input[1];
Marat Dukhan68660992020-02-03 13:31:12 -080036 assert(i1 != NULL);
Marat Dukhan5098c3e2019-11-07 12:01:19 -080037 const float* i2 = input[2];
Marat Dukhan68660992020-02-03 13:31:12 -080038 assert(i2 != NULL);
Marat Dukhan5098c3e2019-11-07 12:01:19 -080039 const float* i3 = input[3];
Marat Dukhan68660992020-02-03 13:31:12 -080040 assert(i3 != NULL);
Marat Dukhan5098c3e2019-11-07 12:01:19 -080041 input = (const float**) ((uintptr_t) input + input_stride);
42
43 size_t c = channels;
44 const float* w = weights;
45 for (; c >= 8; c -= 8) {
46 __m128 vacc0123p0 = _mm_load_ps(w);
47 __m128 vacc4567p0 = _mm_load_ps(w + 4);
48
49
50 const __m128 vi0x0123 = _mm_loadu_ps(i0);
51 const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
52 i0 += 8;
53
54 const __m128 vk0x0123 = _mm_load_ps(w + 8);
55 const __m128 vk0x4567 = _mm_load_ps(w + 12);
56 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
57 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
58
59 const __m128 vi1x0123 = _mm_loadu_ps(i1);
60 const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
61 i1 += 8;
62
63 const __m128 vk1x0123 = _mm_load_ps(w + 16);
64 const __m128 vk1x4567 = _mm_load_ps(w + 20);
65 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
66 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
67
68 const __m128 vi2x0123 = _mm_loadu_ps(i2);
69 const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
70 i2 += 8;
71
72 const __m128 vk2x0123 = _mm_load_ps(w + 24);
73 const __m128 vk2x4567 = _mm_load_ps(w + 28);
74 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
75 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
76
77 const __m128 vi3x0123 = _mm_loadu_ps(i3);
78 const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
79 i3 += 8;
80
81 const __m128 vk3x0123 = _mm_load_ps(w + 32);
82 const __m128 vk3x4567 = _mm_load_ps(w + 36);
83 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
84 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
85
86 w += 40;
87
88
89 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
90 __m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
91 vacc0123 = _mm_min_ps(vacc0123, vmax);
92 vacc4567 = _mm_min_ps(vacc4567, vmax);
93
94 _mm_storeu_ps(output, vacc0123);
95 _mm_storeu_ps(output + 4, vacc4567);
96 output += 8;
97 }
98 for (; c >= 4; c -= 4) {
99 __m128 vacc0123p0 = _mm_load_ps(w);
100
101 const __m128 vi0x0123 = _mm_loadu_ps(i0);
102 i0 += 4;
103
104 const __m128 vk0x0123 = _mm_load_ps(w + 8);
105 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
106
107 const __m128 vi1x0123 = _mm_loadu_ps(i1);
108 i1 += 4;
109
110 const __m128 vk1x0123 = _mm_load_ps(w + 16);
111 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
112
113 const __m128 vi2x0123 = _mm_loadu_ps(i2);
114 i2 += 4;
115
116 const __m128 vk2x0123 = _mm_load_ps(w + 24);
117 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
118
119 const __m128 vi3x0123 = _mm_loadu_ps(i3);
120 i3 += 4;
121
122 const __m128 vk3x0123 = _mm_load_ps(w + 32);
123 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
124
125 w += 4;
126
127
128 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
129 vacc0123 = _mm_min_ps(vacc0123, vmax);
130
131 _mm_storeu_ps(output, vacc0123);
132 output += 4;
133 }
134 if XNN_UNLIKELY(c != 0) {
135 __m128 vacc0123p0 = _mm_load_ps(w);
136
137 const __m128 vi0x0123 = _mm_loadu_ps(i0);
138 const __m128 vk0x0123 = _mm_load_ps(w + 8);
139 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
140
141 const __m128 vi1x0123 = _mm_loadu_ps(i1);
142 const __m128 vk1x0123 = _mm_load_ps(w + 16);
143 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
144
145 const __m128 vi2x0123 = _mm_loadu_ps(i2);
146 const __m128 vk2x0123 = _mm_load_ps(w + 24);
147 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
148
149 const __m128 vi3x0123 = _mm_loadu_ps(i3);
150 const __m128 vk3x0123 = _mm_load_ps(w + 32);
151 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
152
153
154 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
155 vacc0123 = _mm_min_ps(vacc0123, vmax);
156
157 if (c & 2) {
158 _mm_storel_pi((__m64*) output, vacc0123);
159 vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
160 output += 2;
161 }
162 if (c & 1) {
163 _mm_store_ss(output, vacc0123);
164 output += 1;
165 }
166 }
167
168 output = (float*) ((uintptr_t) output + output_increment);
169 } while (--output_width != 0);
170}