blob: cc56306779c85fcd87f85677afdac4bb0c266b84 [file] [log] [blame]
Marat Dukhane76478b2021-06-28 16:35:40 -07001// Auto-generated file. Do not edit!
2// Template: src/qs8-igemm/c8-neon-mull-padal.c.in
3// Generator: tools/xngen
4//
5// Copyright 2021 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/igemm.h>
15#include <xnnpack/math.h>
16
17
18void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal_padal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
31{
32 assert(mr != 0);
33 assert(mr <= 2);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (2 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 kc = round_up_po2(kc, 8 * sizeof(int8_t));
44 int8_t* c0 = c;
45 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr != 2) {
47 c1 = c0;
48 }
49
50 do {
51 int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
52 int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
53 int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
54 int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
55 int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
56 int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
57 int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
58 int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
59 int32x4_t vacc1x0 = vacc0x0;
60 int32x4_t vacc1x1 = vacc0x1;
61 int32x4_t vacc1x2 = vacc0x2;
62 int32x4_t vacc1x3 = vacc0x3;
63 int32x4_t vacc1x4 = vacc0x4;
64 int32x4_t vacc1x5 = vacc0x5;
65 int32x4_t vacc1x6 = vacc0x6;
66 int32x4_t vacc1x7 = vacc0x7;
67
68 size_t p = ks;
69 do {
70 const int8_t* restrict a0 = a[0];
71 if XNN_UNPREDICTABLE(a0 != zero) {
72 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
73 }
74 const int8_t* restrict a1 = a[1];
75 if XNN_UNPREDICTABLE(a1 != zero) {
76 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
77 }
78 a += 2;
79
80 size_t k = kc;
81 // 2x partial unrolled loop to load 16 bytes at a time using MLA.
82 while (k >= 16 * sizeof(int8_t)) {
83 const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
84 const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
85 const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
86 const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
87
88 const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
89 const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
90 const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
91 const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
92 const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
93 const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
94 const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
95 const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
96
97 const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
98 int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
99 int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0);
100 vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
101 vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1);
102 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
103 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
104 const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
105 int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
106 int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0);
107 vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
108 vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1);
109 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
110 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
111 const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
112 int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
113 int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0);
114 vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
115 vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1);
116 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
117 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
118 const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
119 int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
120 int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0);
121 vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
122 vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1);
123 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
124 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
125 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
126 int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
127 int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0);
128 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
129 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1);
130 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
131 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
132 const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
133 int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
134 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0);
135 vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
136 vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1);
137 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
138 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
139 const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
140 int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
141 int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0);
142 vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
143 vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1);
144 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
145 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
146 const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
147 int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
148 int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0);
149 vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
150 vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1);
151 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
152 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
153
154 k -= 16 * sizeof(int8_t);
155 }
156
157 // Handle 8 bytes at a time using MUL.
158 if (k != 0) {
159 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
160 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
161
162 const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
163 const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
164 const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
165 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
166 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
167 const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
168 const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
169 const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
170 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
171 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
172 const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
173 const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
174 const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
175 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
176 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
177 const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
178 const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
179 const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
180 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
181 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
182 const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
183 const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
184 const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
185 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
186 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
187 const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
188 const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
189 const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
190 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
191 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
192 const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
193 const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
194 const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
195 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
196 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
197 const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
198 const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
199 const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
200 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
201 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
202
203 k -= 8 * sizeof(int8_t);
204 }
205
206 p -= 2 * sizeof(void*);
207 } while (p != 0);
208
209#if XNN_ARCH_ARM64
210 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
211 const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
212 const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
213 const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
214 const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
215 const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
216 const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
217 const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
218
219 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
220 int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
221 int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
222 int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
223#else
224 const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
225 const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
226 const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
227 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
228 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
229 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
230 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
231 const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
232 const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
233 const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
234 const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
235 const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
236 const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
237 int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
238 const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
239 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
240 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
241 const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
242 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
243 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
244 int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
245 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
246 const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
247 const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
248 const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
249 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
250 const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
251 int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
252#endif
253
254 float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
255 float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
256 float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
257 float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
258
259 const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
260 vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
261 vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
262 const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
263 vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
264 vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
265
266 const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
267 vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
268 vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
269 vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
270 vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
271
272 const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
273 vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
274 vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
275 vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
276 vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
277
278 const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
279 vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
280 vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
281 vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
282 vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
283
284 const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
285 vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
286 vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
287 vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
288 vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
289
290#if XNN_ARCH_ARM64
291 const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
292 const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
293
294 int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
295#else
296 const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
297 const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
298
299 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
300#endif
301
302 if (nc >= 8) {
303 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
304 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
305
306 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
307 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
308
309 a = (const int8_t**restrict) ((uintptr_t) a - ks);
310
311 nc -= 8;
312 } else {
313 if (nc & 4) {
314 vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
315 vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
316 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
317 }
318 if (nc & 2) {
319 vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
320 vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
321 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
322 }
323 if (nc & 1) {
324 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
325 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
326 }
327
328 nc = 0;
329 }
330 } while (nc != 0);
331}