Frank Barchard | a48848f | 2020-09-24 00:59:24 -0700 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
Marat Dukhan | 66ccf64 | 2020-09-28 16:23:42 -0700 | [diff] [blame] | 2 | // Template: src/qs8-gemm/MRxNRc4-neondot.c.in |
Frank Barchard | a48848f | 2020-09-24 00:59:24 -0700 | [diff] [blame] | 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2020 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | |
| 11 | #include <assert.h> |
| 12 | |
| 13 | #include <arm_neon.h> |
| 14 | |
| 15 | #include <xnnpack/gemm.h> |
| 16 | |
| 17 | |
| 18 | // This kernel uses ARMv8.2 dot-product instructions. |
| 19 | // |
| 20 | // Scalar model: xnn_qs8_gemm_minmax_ukernel_8x16c4__scalar. Refer to |
| 21 | // that kernel for more comments. |
| 22 | void xnn_qs8_gemm_minmax_ukernel_8x16c4__neondot( |
| 23 | size_t mr, |
| 24 | size_t nc, |
| 25 | size_t kc, |
| 26 | const int8_t* restrict a, |
| 27 | size_t a_stride, |
| 28 | const void* restrict w, |
| 29 | int8_t* restrict c, |
| 30 | size_t cm_stride, |
| 31 | size_t cn_stride, |
| 32 | const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { |
| 33 | assert(mr != 0); |
| 34 | assert(mr <= 8); |
| 35 | assert(nc != 0); |
| 36 | assert(kc != 0); |
| 37 | |
| 38 | const int8_t* a0 = a; |
| 39 | int8_t* c0 = c; |
| 40 | const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); |
| 41 | int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); |
| 42 | if XNN_UNPREDICTABLE(mr < 2) { |
| 43 | a1 = a0; |
| 44 | c1 = c0; |
| 45 | } |
| 46 | const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); |
| 47 | int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); |
| 48 | if XNN_UNPREDICTABLE(mr <= 2) { |
| 49 | a2 = a1; |
| 50 | c2 = c1; |
| 51 | } |
| 52 | const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); |
| 53 | int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); |
| 54 | if XNN_UNPREDICTABLE(mr < 4) { |
| 55 | a3 = a2; |
| 56 | c3 = c2; |
| 57 | } |
| 58 | const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); |
| 59 | int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride); |
| 60 | if XNN_UNPREDICTABLE(mr <= 4) { |
| 61 | a4 = a3; |
| 62 | c4 = c3; |
| 63 | } |
| 64 | const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride); |
| 65 | int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride); |
| 66 | if XNN_UNPREDICTABLE(mr < 6) { |
| 67 | a5 = a4; |
| 68 | c5 = c4; |
| 69 | } |
| 70 | const int8_t* a6 = (const int8_t*) ((uintptr_t) a5 + a_stride); |
| 71 | int8_t* c6 = (int8_t*) ((uintptr_t) c5 + cm_stride); |
| 72 | if XNN_UNPREDICTABLE(mr <= 6) { |
| 73 | a6 = a5; |
| 74 | c6 = c5; |
| 75 | } |
| 76 | const int8_t* a7 = (const int8_t*) ((uintptr_t) a6 + a_stride); |
| 77 | int8_t* c7 = (int8_t*) ((uintptr_t) c6 + cm_stride); |
| 78 | if XNN_UNPREDICTABLE(mr != 8) { |
| 79 | a7 = a6; |
| 80 | c7 = c6; |
| 81 | } |
| 82 | |
| 83 | // Loop over groups of 16 columns. |
| 84 | do { |
| 85 | // Initialize accumulators with bias. 16 bias values are loaded from the |
| 86 | // weight matrix, at the start of the group of 16 columns. |
| 87 | int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); |
| 88 | int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); |
| 89 | int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); |
| 90 | int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); |
| 91 | int32x4_t vacc1x0123 = vacc0x0123; |
| 92 | int32x4_t vacc1x4567 = vacc0x4567; |
| 93 | int32x4_t vacc1x89AB = vacc0x89AB; |
| 94 | int32x4_t vacc1xCDEF = vacc0xCDEF; |
| 95 | int32x4_t vacc2x0123 = vacc0x0123; |
| 96 | int32x4_t vacc2x4567 = vacc0x4567; |
| 97 | int32x4_t vacc2x89AB = vacc0x89AB; |
| 98 | int32x4_t vacc2xCDEF = vacc0xCDEF; |
| 99 | int32x4_t vacc3x0123 = vacc0x0123; |
| 100 | int32x4_t vacc3x4567 = vacc0x4567; |
| 101 | int32x4_t vacc3x89AB = vacc0x89AB; |
| 102 | int32x4_t vacc3xCDEF = vacc0xCDEF; |
| 103 | int32x4_t vacc4x0123 = vacc0x0123; |
| 104 | int32x4_t vacc4x4567 = vacc0x4567; |
| 105 | int32x4_t vacc4x89AB = vacc0x89AB; |
| 106 | int32x4_t vacc4xCDEF = vacc0xCDEF; |
| 107 | int32x4_t vacc5x0123 = vacc0x0123; |
| 108 | int32x4_t vacc5x4567 = vacc0x4567; |
| 109 | int32x4_t vacc5x89AB = vacc0x89AB; |
| 110 | int32x4_t vacc5xCDEF = vacc0xCDEF; |
| 111 | int32x4_t vacc6x0123 = vacc0x0123; |
| 112 | int32x4_t vacc6x4567 = vacc0x4567; |
| 113 | int32x4_t vacc6x89AB = vacc0x89AB; |
| 114 | int32x4_t vacc6xCDEF = vacc0xCDEF; |
| 115 | int32x4_t vacc7x0123 = vacc0x0123; |
| 116 | int32x4_t vacc7x4567 = vacc0x4567; |
| 117 | int32x4_t vacc7x89AB = vacc0x89AB; |
| 118 | int32x4_t vacc7xCDEF = vacc0xCDEF; |
| 119 | |
| 120 | // Inner accumulation loop along the 16 columns. |
| 121 | size_t k = kc; |
| 122 | // 2x partial unrolled loop to load 8 bytes at a time. |
| 123 | while (k >= 8 * sizeof(int8_t)) { |
| 124 | // Load a 8x8 block of activations. |
| 125 | const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; |
| 126 | const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; |
| 127 | const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; |
| 128 | const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; |
| 129 | const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8; |
| 130 | const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8; |
| 131 | const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 8; |
| 132 | const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 8; |
| 133 | |
| 134 | // Load a 8x16 block of weights. |
| 135 | const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 136 | const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 137 | const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 138 | const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 139 | const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 140 | const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 141 | const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 142 | const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 143 | |
| 144 | // Multiply-accumulate: 8x8 * 8x16 --> 8x16. |
| 145 | vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); |
| 146 | vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); |
| 147 | vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); |
| 148 | vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); |
| 149 | vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); |
| 150 | vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); |
| 151 | vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); |
| 152 | vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); |
| 153 | vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); |
| 154 | vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); |
| 155 | vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); |
| 156 | vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); |
| 157 | vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); |
| 158 | vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); |
| 159 | vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); |
| 160 | vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); |
| 161 | vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); |
| 162 | vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); |
| 163 | vacc4x89AB = vdotq_lane_s32(vacc4x89AB, vb0123x89AB, va4x01234567, 0); |
| 164 | vacc4xCDEF = vdotq_lane_s32(vacc4xCDEF, vb0123xCDEF, va4x01234567, 0); |
| 165 | vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); |
| 166 | vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); |
| 167 | vacc5x89AB = vdotq_lane_s32(vacc5x89AB, vb0123x89AB, va5x01234567, 0); |
| 168 | vacc5xCDEF = vdotq_lane_s32(vacc5xCDEF, vb0123xCDEF, va5x01234567, 0); |
| 169 | vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); |
| 170 | vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); |
| 171 | vacc6x89AB = vdotq_lane_s32(vacc6x89AB, vb0123x89AB, va6x01234567, 0); |
| 172 | vacc6xCDEF = vdotq_lane_s32(vacc6xCDEF, vb0123xCDEF, va6x01234567, 0); |
| 173 | vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); |
| 174 | vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); |
| 175 | vacc7x89AB = vdotq_lane_s32(vacc7x89AB, vb0123x89AB, va7x01234567, 0); |
| 176 | vacc7xCDEF = vdotq_lane_s32(vacc7xCDEF, vb0123xCDEF, va7x01234567, 0); |
| 177 | vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); |
| 178 | vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); |
| 179 | vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1); |
| 180 | vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1); |
| 181 | vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); |
| 182 | vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); |
| 183 | vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1); |
| 184 | vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1); |
| 185 | vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); |
| 186 | vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); |
| 187 | vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1); |
| 188 | vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1); |
| 189 | vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); |
| 190 | vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); |
| 191 | vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1); |
| 192 | vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1); |
| 193 | vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); |
| 194 | vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); |
| 195 | vacc4x89AB = vdotq_lane_s32(vacc4x89AB, vb4567x89AB, va4x01234567, 1); |
| 196 | vacc4xCDEF = vdotq_lane_s32(vacc4xCDEF, vb4567xCDEF, va4x01234567, 1); |
| 197 | vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); |
| 198 | vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); |
| 199 | vacc5x89AB = vdotq_lane_s32(vacc5x89AB, vb4567x89AB, va5x01234567, 1); |
| 200 | vacc5xCDEF = vdotq_lane_s32(vacc5xCDEF, vb4567xCDEF, va5x01234567, 1); |
| 201 | vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1); |
| 202 | vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1); |
| 203 | vacc6x89AB = vdotq_lane_s32(vacc6x89AB, vb4567x89AB, va6x01234567, 1); |
| 204 | vacc6xCDEF = vdotq_lane_s32(vacc6xCDEF, vb4567xCDEF, va6x01234567, 1); |
| 205 | vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1); |
| 206 | vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1); |
| 207 | vacc7x89AB = vdotq_lane_s32(vacc7x89AB, vb4567x89AB, va7x01234567, 1); |
| 208 | vacc7xCDEF = vdotq_lane_s32(vacc7xCDEF, vb4567xCDEF, va7x01234567, 1); |
| 209 | |
| 210 | k -= 8 * sizeof(int8_t); |
| 211 | } |
| 212 | // Handle up to 7 final positions of `k` |
| 213 | if XNN_UNLIKELY(k != 0) { |
| 214 | // Load a 8x4 block of activations. |
| 215 | const int8x8_t va0x01234567 = vld1_s8(a0); a0 += k; |
| 216 | const int8x8_t va1x01234567 = vld1_s8(a1); a1 += k; |
| 217 | const int8x8_t va2x01234567 = vld1_s8(a2); a2 += k; |
| 218 | const int8x8_t va3x01234567 = vld1_s8(a3); a3 += k; |
| 219 | const int8x8_t va4x01234567 = vld1_s8(a4); a4 += k; |
| 220 | const int8x8_t va5x01234567 = vld1_s8(a5); a5 += k; |
| 221 | const int8x8_t va6x01234567 = vld1_s8(a6); a6 += k; |
| 222 | const int8x8_t va7x01234567 = vld1_s8(a7); a7 += k; |
| 223 | |
| 224 | // Load a 4x16 block of weights. |
| 225 | const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 226 | const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 227 | const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 228 | const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 229 | |
| 230 | // Multiply-accumulate: 8x4 * 4x16 --> 8x16. |
| 231 | vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); |
| 232 | vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); |
| 233 | vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); |
| 234 | vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); |
| 235 | vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); |
| 236 | vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); |
| 237 | vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); |
| 238 | vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); |
| 239 | vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); |
| 240 | vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); |
| 241 | vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); |
| 242 | vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); |
| 243 | vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); |
| 244 | vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); |
| 245 | vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); |
| 246 | vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); |
| 247 | vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); |
| 248 | vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); |
| 249 | vacc4x89AB = vdotq_lane_s32(vacc4x89AB, vb0123x89AB, va4x01234567, 0); |
| 250 | vacc4xCDEF = vdotq_lane_s32(vacc4xCDEF, vb0123xCDEF, va4x01234567, 0); |
| 251 | vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); |
| 252 | vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); |
| 253 | vacc5x89AB = vdotq_lane_s32(vacc5x89AB, vb0123x89AB, va5x01234567, 0); |
| 254 | vacc5xCDEF = vdotq_lane_s32(vacc5xCDEF, vb0123xCDEF, va5x01234567, 0); |
| 255 | vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); |
| 256 | vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); |
| 257 | vacc6x89AB = vdotq_lane_s32(vacc6x89AB, vb0123x89AB, va6x01234567, 0); |
| 258 | vacc6xCDEF = vdotq_lane_s32(vacc6xCDEF, vb0123xCDEF, va6x01234567, 0); |
| 259 | vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); |
| 260 | vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); |
| 261 | vacc7x89AB = vdotq_lane_s32(vacc7x89AB, vb0123x89AB, va7x01234567, 0); |
| 262 | vacc7xCDEF = vdotq_lane_s32(vacc7xCDEF, vb0123xCDEF, va7x01234567, 0); |
| 263 | |
| 264 | if (k > 4) { |
| 265 | // Load a 4x16 block of weights. |
| 266 | const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 267 | const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 268 | const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 269 | const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| 270 | |
| 271 | // Multiply-accumulate: 8x4 * 4x16 --> 8x16. |
| 272 | vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); |
| 273 | vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); |
| 274 | vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1); |
| 275 | vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1); |
| 276 | vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); |
| 277 | vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); |
| 278 | vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1); |
| 279 | vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1); |
| 280 | vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); |
| 281 | vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); |
| 282 | vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1); |
| 283 | vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1); |
| 284 | vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); |
| 285 | vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); |
| 286 | vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1); |
| 287 | vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1); |
| 288 | vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); |
| 289 | vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); |
| 290 | vacc4x89AB = vdotq_lane_s32(vacc4x89AB, vb4567x89AB, va4x01234567, 1); |
| 291 | vacc4xCDEF = vdotq_lane_s32(vacc4xCDEF, vb4567xCDEF, va4x01234567, 1); |
| 292 | vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); |
| 293 | vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); |
| 294 | vacc5x89AB = vdotq_lane_s32(vacc5x89AB, vb4567x89AB, va5x01234567, 1); |
| 295 | vacc5xCDEF = vdotq_lane_s32(vacc5xCDEF, vb4567xCDEF, va5x01234567, 1); |
| 296 | vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1); |
| 297 | vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1); |
| 298 | vacc6x89AB = vdotq_lane_s32(vacc6x89AB, vb4567x89AB, va6x01234567, 1); |
| 299 | vacc6xCDEF = vdotq_lane_s32(vacc6xCDEF, vb4567xCDEF, va6x01234567, 1); |
| 300 | vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1); |
| 301 | vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1); |
| 302 | vacc7x89AB = vdotq_lane_s32(vacc7x89AB, vb4567x89AB, va7x01234567, 1); |
| 303 | vacc7xCDEF = vdotq_lane_s32(vacc7xCDEF, vb4567xCDEF, va7x01234567, 1); |
| 304 | } |
| 305 | } |
| 306 | // End of accumulation loop. The variable `kc` contains the amount by which |
| 307 | // we advanced the `va` pointers, so we rewind by this amount now. |
| 308 | a0 = (const int8_t*)((uintptr_t)a0 - kc); |
| 309 | a1 = (const int8_t*)((uintptr_t)a1 - kc); |
| 310 | a2 = (const int8_t*)((uintptr_t)a2 - kc); |
| 311 | a3 = (const int8_t*)((uintptr_t)a3 - kc); |
| 312 | a4 = (const int8_t*)((uintptr_t)a4 - kc); |
| 313 | a5 = (const int8_t*)((uintptr_t)a5 - kc); |
| 314 | a6 = (const int8_t*)((uintptr_t)a6 - kc); |
| 315 | a7 = (const int8_t*)((uintptr_t)a7 - kc); |
| 316 | |
| 317 | // Post-accumulation work |
| 318 | |
| 319 | const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); |
| 320 | const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); |
| 321 | |
| 322 | const int32x4_t vproduct0x0123 = vqrdmulhq_n_s32(vacc0x0123, params->neon.multiplier); |
| 323 | const int32x4_t vproduct0x4567 = vqrdmulhq_n_s32(vacc0x4567, params->neon.multiplier); |
| 324 | const int32x4_t vproduct0x89AB = vqrdmulhq_n_s32(vacc0x89AB, params->neon.multiplier); |
| 325 | const int32x4_t vproduct0xCDEF = vqrdmulhq_n_s32(vacc0xCDEF, params->neon.multiplier); |
| 326 | const int32x4_t vproduct1x0123 = vqrdmulhq_n_s32(vacc1x0123, params->neon.multiplier); |
| 327 | const int32x4_t vproduct1x4567 = vqrdmulhq_n_s32(vacc1x4567, params->neon.multiplier); |
| 328 | const int32x4_t vproduct1x89AB = vqrdmulhq_n_s32(vacc1x89AB, params->neon.multiplier); |
| 329 | const int32x4_t vproduct1xCDEF = vqrdmulhq_n_s32(vacc1xCDEF, params->neon.multiplier); |
| 330 | const int32x4_t vproduct2x0123 = vqrdmulhq_n_s32(vacc2x0123, params->neon.multiplier); |
| 331 | const int32x4_t vproduct2x4567 = vqrdmulhq_n_s32(vacc2x4567, params->neon.multiplier); |
| 332 | const int32x4_t vproduct2x89AB = vqrdmulhq_n_s32(vacc2x89AB, params->neon.multiplier); |
| 333 | const int32x4_t vproduct2xCDEF = vqrdmulhq_n_s32(vacc2xCDEF, params->neon.multiplier); |
| 334 | const int32x4_t vproduct3x0123 = vqrdmulhq_n_s32(vacc3x0123, params->neon.multiplier); |
| 335 | const int32x4_t vproduct3x4567 = vqrdmulhq_n_s32(vacc3x4567, params->neon.multiplier); |
| 336 | const int32x4_t vproduct3x89AB = vqrdmulhq_n_s32(vacc3x89AB, params->neon.multiplier); |
| 337 | const int32x4_t vproduct3xCDEF = vqrdmulhq_n_s32(vacc3xCDEF, params->neon.multiplier); |
| 338 | const int32x4_t vproduct4x0123 = vqrdmulhq_n_s32(vacc4x0123, params->neon.multiplier); |
| 339 | const int32x4_t vproduct4x4567 = vqrdmulhq_n_s32(vacc4x4567, params->neon.multiplier); |
| 340 | const int32x4_t vproduct4x89AB = vqrdmulhq_n_s32(vacc4x89AB, params->neon.multiplier); |
| 341 | const int32x4_t vproduct4xCDEF = vqrdmulhq_n_s32(vacc4xCDEF, params->neon.multiplier); |
| 342 | const int32x4_t vproduct5x0123 = vqrdmulhq_n_s32(vacc5x0123, params->neon.multiplier); |
| 343 | const int32x4_t vproduct5x4567 = vqrdmulhq_n_s32(vacc5x4567, params->neon.multiplier); |
| 344 | const int32x4_t vproduct5x89AB = vqrdmulhq_n_s32(vacc5x89AB, params->neon.multiplier); |
| 345 | const int32x4_t vproduct5xCDEF = vqrdmulhq_n_s32(vacc5xCDEF, params->neon.multiplier); |
| 346 | const int32x4_t vproduct6x0123 = vqrdmulhq_n_s32(vacc6x0123, params->neon.multiplier); |
| 347 | const int32x4_t vproduct6x4567 = vqrdmulhq_n_s32(vacc6x4567, params->neon.multiplier); |
| 348 | const int32x4_t vproduct6x89AB = vqrdmulhq_n_s32(vacc6x89AB, params->neon.multiplier); |
| 349 | const int32x4_t vproduct6xCDEF = vqrdmulhq_n_s32(vacc6xCDEF, params->neon.multiplier); |
| 350 | const int32x4_t vproduct7x0123 = vqrdmulhq_n_s32(vacc7x0123, params->neon.multiplier); |
| 351 | const int32x4_t vproduct7x4567 = vqrdmulhq_n_s32(vacc7x4567, params->neon.multiplier); |
| 352 | const int32x4_t vproduct7x89AB = vqrdmulhq_n_s32(vacc7x89AB, params->neon.multiplier); |
| 353 | const int32x4_t vproduct7xCDEF = vqrdmulhq_n_s32(vacc7xCDEF, params->neon.multiplier); |
| 354 | |
| 355 | vacc0x0123 = vsraq_n_s32(vproduct0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31); |
| 356 | vacc0x4567 = vsraq_n_s32(vproduct0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31); |
| 357 | vacc0x89AB = vsraq_n_s32(vproduct0x89AB, vbicq_s32(vacc0x89AB, vzero_shift_mask), 31); |
| 358 | vacc0xCDEF = vsraq_n_s32(vproduct0xCDEF, vbicq_s32(vacc0xCDEF, vzero_shift_mask), 31); |
| 359 | vacc1x0123 = vsraq_n_s32(vproduct1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31); |
| 360 | vacc1x4567 = vsraq_n_s32(vproduct1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31); |
| 361 | vacc1x89AB = vsraq_n_s32(vproduct1x89AB, vbicq_s32(vacc1x89AB, vzero_shift_mask), 31); |
| 362 | vacc1xCDEF = vsraq_n_s32(vproduct1xCDEF, vbicq_s32(vacc1xCDEF, vzero_shift_mask), 31); |
| 363 | vacc2x0123 = vsraq_n_s32(vproduct2x0123, vbicq_s32(vacc2x0123, vzero_shift_mask), 31); |
| 364 | vacc2x4567 = vsraq_n_s32(vproduct2x4567, vbicq_s32(vacc2x4567, vzero_shift_mask), 31); |
| 365 | vacc2x89AB = vsraq_n_s32(vproduct2x89AB, vbicq_s32(vacc2x89AB, vzero_shift_mask), 31); |
| 366 | vacc2xCDEF = vsraq_n_s32(vproduct2xCDEF, vbicq_s32(vacc2xCDEF, vzero_shift_mask), 31); |
| 367 | vacc3x0123 = vsraq_n_s32(vproduct3x0123, vbicq_s32(vacc3x0123, vzero_shift_mask), 31); |
| 368 | vacc3x4567 = vsraq_n_s32(vproduct3x4567, vbicq_s32(vacc3x4567, vzero_shift_mask), 31); |
| 369 | vacc3x89AB = vsraq_n_s32(vproduct3x89AB, vbicq_s32(vacc3x89AB, vzero_shift_mask), 31); |
| 370 | vacc3xCDEF = vsraq_n_s32(vproduct3xCDEF, vbicq_s32(vacc3xCDEF, vzero_shift_mask), 31); |
| 371 | vacc4x0123 = vsraq_n_s32(vproduct4x0123, vbicq_s32(vacc4x0123, vzero_shift_mask), 31); |
| 372 | vacc4x4567 = vsraq_n_s32(vproduct4x4567, vbicq_s32(vacc4x4567, vzero_shift_mask), 31); |
| 373 | vacc4x89AB = vsraq_n_s32(vproduct4x89AB, vbicq_s32(vacc4x89AB, vzero_shift_mask), 31); |
| 374 | vacc4xCDEF = vsraq_n_s32(vproduct4xCDEF, vbicq_s32(vacc4xCDEF, vzero_shift_mask), 31); |
| 375 | vacc5x0123 = vsraq_n_s32(vproduct5x0123, vbicq_s32(vacc5x0123, vzero_shift_mask), 31); |
| 376 | vacc5x4567 = vsraq_n_s32(vproduct5x4567, vbicq_s32(vacc5x4567, vzero_shift_mask), 31); |
| 377 | vacc5x89AB = vsraq_n_s32(vproduct5x89AB, vbicq_s32(vacc5x89AB, vzero_shift_mask), 31); |
| 378 | vacc5xCDEF = vsraq_n_s32(vproduct5xCDEF, vbicq_s32(vacc5xCDEF, vzero_shift_mask), 31); |
| 379 | vacc6x0123 = vsraq_n_s32(vproduct6x0123, vbicq_s32(vacc6x0123, vzero_shift_mask), 31); |
| 380 | vacc6x4567 = vsraq_n_s32(vproduct6x4567, vbicq_s32(vacc6x4567, vzero_shift_mask), 31); |
| 381 | vacc6x89AB = vsraq_n_s32(vproduct6x89AB, vbicq_s32(vacc6x89AB, vzero_shift_mask), 31); |
| 382 | vacc6xCDEF = vsraq_n_s32(vproduct6xCDEF, vbicq_s32(vacc6xCDEF, vzero_shift_mask), 31); |
| 383 | vacc7x0123 = vsraq_n_s32(vproduct7x0123, vbicq_s32(vacc7x0123, vzero_shift_mask), 31); |
| 384 | vacc7x4567 = vsraq_n_s32(vproduct7x4567, vbicq_s32(vacc7x4567, vzero_shift_mask), 31); |
| 385 | vacc7x89AB = vsraq_n_s32(vproduct7x89AB, vbicq_s32(vacc7x89AB, vzero_shift_mask), 31); |
| 386 | vacc7xCDEF = vsraq_n_s32(vproduct7xCDEF, vbicq_s32(vacc7xCDEF, vzero_shift_mask), 31); |
| 387 | |
| 388 | vacc0x0123 = vrshlq_s32(vacc0x0123, vright_shift); |
| 389 | vacc0x4567 = vrshlq_s32(vacc0x4567, vright_shift); |
| 390 | vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_shift); |
| 391 | vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_shift); |
| 392 | vacc1x0123 = vrshlq_s32(vacc1x0123, vright_shift); |
| 393 | vacc1x4567 = vrshlq_s32(vacc1x4567, vright_shift); |
| 394 | vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_shift); |
| 395 | vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_shift); |
| 396 | vacc2x0123 = vrshlq_s32(vacc2x0123, vright_shift); |
| 397 | vacc2x4567 = vrshlq_s32(vacc2x4567, vright_shift); |
| 398 | vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_shift); |
| 399 | vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_shift); |
| 400 | vacc3x0123 = vrshlq_s32(vacc3x0123, vright_shift); |
| 401 | vacc3x4567 = vrshlq_s32(vacc3x4567, vright_shift); |
| 402 | vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_shift); |
| 403 | vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_shift); |
| 404 | vacc4x0123 = vrshlq_s32(vacc4x0123, vright_shift); |
| 405 | vacc4x4567 = vrshlq_s32(vacc4x4567, vright_shift); |
| 406 | vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_shift); |
| 407 | vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_shift); |
| 408 | vacc5x0123 = vrshlq_s32(vacc5x0123, vright_shift); |
| 409 | vacc5x4567 = vrshlq_s32(vacc5x4567, vright_shift); |
| 410 | vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_shift); |
| 411 | vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_shift); |
| 412 | vacc6x0123 = vrshlq_s32(vacc6x0123, vright_shift); |
| 413 | vacc6x4567 = vrshlq_s32(vacc6x4567, vright_shift); |
| 414 | vacc6x89AB = vrshlq_s32(vacc6x89AB, vright_shift); |
| 415 | vacc6xCDEF = vrshlq_s32(vacc6xCDEF, vright_shift); |
| 416 | vacc7x0123 = vrshlq_s32(vacc7x0123, vright_shift); |
| 417 | vacc7x4567 = vrshlq_s32(vacc7x4567, vright_shift); |
| 418 | vacc7x89AB = vrshlq_s32(vacc7x89AB, vright_shift); |
| 419 | vacc7xCDEF = vrshlq_s32(vacc7xCDEF, vright_shift); |
| 420 | |
| 421 | const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point); |
| 422 | #if XNN_ARCH_ARM64 |
| 423 | const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); |
| 424 | const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point); |
| 425 | const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); |
| 426 | const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point); |
| 427 | const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); |
| 428 | const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point); |
| 429 | const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); |
| 430 | const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point); |
| 431 | const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point); |
| 432 | const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point); |
| 433 | const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point); |
| 434 | const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point); |
| 435 | const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point); |
| 436 | const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x89AB), vacc6xCDEF), voutput_zero_point); |
| 437 | const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point); |
| 438 | const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x89AB), vacc7xCDEF), voutput_zero_point); |
| 439 | |
| 440 | int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); |
| 441 | int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF); |
| 442 | int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF); |
| 443 | int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF); |
| 444 | int8x16_t vout4x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc4x89ABCDEF); |
| 445 | int8x16_t vout5x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc5x01234567), vacc5x89ABCDEF); |
| 446 | int8x16_t vout6x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc6x01234567), vacc6x89ABCDEF); |
| 447 | int8x16_t vout7x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc7x01234567), vacc7x89ABCDEF); |
| 448 | #else |
| 449 | const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); |
| 450 | const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point); |
| 451 | const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); |
| 452 | const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point); |
| 453 | const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); |
| 454 | const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point); |
| 455 | const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); |
| 456 | const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point); |
| 457 | const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point); |
| 458 | const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point); |
| 459 | const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point); |
| 460 | const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point); |
| 461 | const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point); |
| 462 | const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x89AB), vqmovn_s32(vacc6xCDEF)), voutput_zero_point); |
| 463 | const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point); |
| 464 | const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x89AB), vqmovn_s32(vacc7xCDEF)), voutput_zero_point); |
| 465 | |
| 466 | int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); |
| 467 | int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF)); |
| 468 | int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF)); |
| 469 | int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF)); |
| 470 | int8x16_t vout4x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc4x89ABCDEF)); |
| 471 | int8x16_t vout5x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc5x01234567), vqmovn_s16(vacc5x89ABCDEF)); |
| 472 | int8x16_t vout6x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc6x01234567), vqmovn_s16(vacc6x89ABCDEF)); |
| 473 | int8x16_t vout7x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc7x01234567), vqmovn_s16(vacc7x89ABCDEF)); |
| 474 | #endif |
| 475 | const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min); |
| 476 | const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max); |
| 477 | |
| 478 | vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); |
| 479 | vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min); |
| 480 | vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min); |
| 481 | vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min); |
| 482 | vout4x0123456789ABCDEF = vmaxq_s8(vout4x0123456789ABCDEF, voutput_min); |
| 483 | vout5x0123456789ABCDEF = vmaxq_s8(vout5x0123456789ABCDEF, voutput_min); |
| 484 | vout6x0123456789ABCDEF = vmaxq_s8(vout6x0123456789ABCDEF, voutput_min); |
| 485 | vout7x0123456789ABCDEF = vmaxq_s8(vout7x0123456789ABCDEF, voutput_min); |
| 486 | |
| 487 | vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); |
| 488 | vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max); |
| 489 | vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max); |
| 490 | vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max); |
| 491 | vout4x0123456789ABCDEF = vminq_s8(vout4x0123456789ABCDEF, voutput_max); |
| 492 | vout5x0123456789ABCDEF = vminq_s8(vout5x0123456789ABCDEF, voutput_max); |
| 493 | vout6x0123456789ABCDEF = vminq_s8(vout6x0123456789ABCDEF, voutput_max); |
| 494 | vout7x0123456789ABCDEF = vminq_s8(vout7x0123456789ABCDEF, voutput_max); |
| 495 | |
| 496 | if (nc >= 16) { |
| 497 | // Main case where there the 16 columns fit in the destination. |
| 498 | vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); |
| 499 | vst1q_s8(c1 + 0, vout1x0123456789ABCDEF); |
| 500 | vst1q_s8(c2 + 0, vout2x0123456789ABCDEF); |
| 501 | vst1q_s8(c3 + 0, vout3x0123456789ABCDEF); |
| 502 | vst1q_s8(c4 + 0, vout4x0123456789ABCDEF); |
| 503 | vst1q_s8(c5 + 0, vout5x0123456789ABCDEF); |
| 504 | vst1q_s8(c6 + 0, vout6x0123456789ABCDEF); |
| 505 | vst1q_s8(c7 + 0, vout7x0123456789ABCDEF); |
| 506 | |
| 507 | // Advance to the next 16 columns. |
| 508 | c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); |
| 509 | c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); |
| 510 | c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); |
| 511 | c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); |
| 512 | c4 = (int8_t*) ((uintptr_t) c4 + cn_stride); |
| 513 | c5 = (int8_t*) ((uintptr_t) c5 + cn_stride); |
| 514 | c6 = (int8_t*) ((uintptr_t) c6 + cn_stride); |
| 515 | c7 = (int8_t*) ((uintptr_t) c7 + cn_stride); |
| 516 | |
| 517 | nc -= 16; |
| 518 | } else { |
| 519 | // Final case where not all of the 16 columns fit in the destination. |
| 520 | int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF)); |
| 521 | int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF)); |
| 522 | int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vget_low_s8(vout4x0123456789ABCDEF), vget_low_s8(vout5x0123456789ABCDEF)); |
| 523 | int8x16_t vout6x01234567_7x01234567 = vcombine_s8(vget_low_s8(vout6x0123456789ABCDEF), vget_low_s8(vout7x0123456789ABCDEF)); |
| 524 | if (nc & 8) { |
| 525 | vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8; |
| 526 | vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8; |
| 527 | vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8; |
| 528 | vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8; |
| 529 | vst1_s8(c4, vget_low_s8(vout4x01234567_5x01234567)); c4 += 8; |
| 530 | vst1_s8(c5, vget_high_s8(vout4x01234567_5x01234567)); c5 += 8; |
| 531 | vst1_s8(c6, vget_low_s8(vout6x01234567_7x01234567)); c6 += 8; |
| 532 | vst1_s8(c7, vget_high_s8(vout6x01234567_7x01234567)); c7 += 8; |
| 533 | vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF)); |
| 534 | vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF)); |
| 535 | vout4x01234567_5x01234567 = vcombine_s8(vget_high_s8(vout4x0123456789ABCDEF), vget_high_s8(vout5x0123456789ABCDEF)); |
| 536 | vout6x01234567_7x01234567 = vcombine_s8(vget_high_s8(vout6x0123456789ABCDEF), vget_high_s8(vout7x0123456789ABCDEF)); |
| 537 | } |
| 538 | if (nc & 4) { |
| 539 | vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; |
| 540 | vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; |
| 541 | vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; |
| 542 | vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; |
| 543 | vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4; |
| 544 | vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4; |
| 545 | vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_s8(vout6x01234567_7x01234567), 0); c6 += 4; |
| 546 | vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_s8(vout6x01234567_7x01234567), 2); c7 += 4; |
| 547 | vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); |
| 548 | vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); |
| 549 | vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); |
| 550 | vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4); |
| 551 | } |
| 552 | if (nc & 2) { |
| 553 | vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; |
| 554 | vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; |
| 555 | vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; |
| 556 | vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; |
| 557 | vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2; |
| 558 | vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2; |
| 559 | vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_s8(vout6x01234567_7x01234567), 0); c6 += 2; |
| 560 | vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_s8(vout6x01234567_7x01234567), 4); c7 += 2; |
| 561 | vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); |
| 562 | vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); |
| 563 | vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); |
| 564 | vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2); |
| 565 | } |
| 566 | if (nc & 1) { |
| 567 | vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); |
| 568 | vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); |
| 569 | vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); |
| 570 | vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); |
| 571 | vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0); |
| 572 | vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8); |
| 573 | vst1q_lane_s8(c6, vout6x01234567_7x01234567, 0); |
| 574 | vst1q_lane_s8(c7, vout6x01234567_7x01234567, 8); |
| 575 | } |
| 576 | |
| 577 | nc = 0; |
| 578 | } |
| 579 | } while (nc != 0); |
| 580 | } |