Frank Barchard | da78da1 | 2021-03-02 14:28:00 -0800 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/qs8-igemm/c8-neon-mull-padal.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2021 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <arm_neon.h> |
| 13 | |
| 14 | #include <xnnpack/igemm.h> |
| 15 | #include <xnnpack/math.h> |
| 16 | |
| 17 | |
Marat Dukhan | b07c26a | 2021-05-24 19:44:51 -0700 | [diff] [blame^] | 18 | void xnn_qs8_igemm_minmax_gemmlowp_ukernel_4x8c8__neon_mlal_padal( |
Frank Barchard | da78da1 | 2021-03-02 14:28:00 -0800 | [diff] [blame] | 19 | size_t mr, |
| 20 | size_t nc, |
| 21 | size_t kc, |
| 22 | size_t ks, |
| 23 | const int8_t** restrict a, |
| 24 | const void* restrict w, |
| 25 | int8_t* restrict c, |
| 26 | size_t cm_stride, |
| 27 | size_t cn_stride, |
| 28 | size_t a_offset, |
| 29 | const int8_t* zero, |
Marat Dukhan | 4c6640c | 2021-04-15 14:21:32 -0700 | [diff] [blame] | 30 | const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN |
Frank Barchard | da78da1 | 2021-03-02 14:28:00 -0800 | [diff] [blame] | 31 | { |
| 32 | assert(mr != 0); |
| 33 | assert(mr <= 4); |
| 34 | assert(nc != 0); |
| 35 | assert(kc != 0); |
| 36 | assert(ks != 0); |
| 37 | assert(ks % (4 * sizeof(void*)) == 0); |
| 38 | assert(a_offset % sizeof(int8_t) == 0); |
| 39 | assert(a != NULL); |
| 40 | assert(w != NULL); |
| 41 | assert(c != NULL); |
| 42 | |
| 43 | kc = round_up_po2(kc, 8); |
| 44 | int8_t* c0 = c; |
| 45 | int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); |
| 46 | if XNN_UNPREDICTABLE(mr < 2) { |
| 47 | c1 = c0; |
| 48 | } |
| 49 | int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); |
| 50 | if XNN_UNPREDICTABLE(mr <= 2) { |
| 51 | c2 = c1; |
| 52 | } |
| 53 | int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); |
| 54 | if XNN_UNPREDICTABLE(mr != 4) { |
| 55 | c3 = c2; |
| 56 | } |
| 57 | |
| 58 | do { |
| 59 | int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 60 | int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 61 | int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 62 | int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 63 | int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 64 | int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 65 | int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 66 | int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); |
| 67 | int32x4_t vacc1x0 = vacc0x0; |
| 68 | int32x4_t vacc1x1 = vacc0x1; |
| 69 | int32x4_t vacc1x2 = vacc0x2; |
| 70 | int32x4_t vacc1x3 = vacc0x3; |
| 71 | int32x4_t vacc1x4 = vacc0x4; |
| 72 | int32x4_t vacc1x5 = vacc0x5; |
| 73 | int32x4_t vacc1x6 = vacc0x6; |
| 74 | int32x4_t vacc1x7 = vacc0x7; |
| 75 | int32x4_t vacc2x0 = vacc0x0; |
| 76 | int32x4_t vacc2x1 = vacc0x1; |
| 77 | int32x4_t vacc2x2 = vacc0x2; |
| 78 | int32x4_t vacc2x3 = vacc0x3; |
| 79 | int32x4_t vacc2x4 = vacc0x4; |
| 80 | int32x4_t vacc2x5 = vacc0x5; |
| 81 | int32x4_t vacc2x6 = vacc0x6; |
| 82 | int32x4_t vacc2x7 = vacc0x7; |
| 83 | int32x4_t vacc3x0 = vacc0x0; |
| 84 | int32x4_t vacc3x1 = vacc0x1; |
| 85 | int32x4_t vacc3x2 = vacc0x2; |
| 86 | int32x4_t vacc3x3 = vacc0x3; |
| 87 | int32x4_t vacc3x4 = vacc0x4; |
| 88 | int32x4_t vacc3x5 = vacc0x5; |
| 89 | int32x4_t vacc3x6 = vacc0x6; |
| 90 | int32x4_t vacc3x7 = vacc0x7; |
| 91 | |
| 92 | size_t p = ks; |
| 93 | do { |
| 94 | const int8_t* restrict a0 = a[0]; |
| 95 | if XNN_UNPREDICTABLE(a0 != zero) { |
| 96 | a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); |
| 97 | } |
| 98 | const int8_t* restrict a1 = a[1]; |
| 99 | if XNN_UNPREDICTABLE(a1 != zero) { |
| 100 | a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); |
| 101 | } |
| 102 | const int8_t* restrict a2 = a[2]; |
| 103 | if XNN_UNPREDICTABLE(a2 != zero) { |
| 104 | a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); |
| 105 | } |
| 106 | const int8_t* restrict a3 = a[3]; |
| 107 | if XNN_UNPREDICTABLE(a3 != zero) { |
| 108 | a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); |
| 109 | } |
| 110 | a += 4; |
| 111 | |
| 112 | size_t k = kc; |
| 113 | // 2x partial unrolled loop to load 16 bytes at a time using MLA. |
| 114 | while (k >= 16 * sizeof(int8_t)) { |
| 115 | const int8x8_t va0x0 = vld1_s8(a0); a0 += 8; |
| 116 | const int8x8_t va0x1 = vld1_s8(a0); a0 += 8; |
| 117 | const int8x8_t va1x0 = vld1_s8(a1); a1 += 8; |
| 118 | const int8x8_t va1x1 = vld1_s8(a1); a1 += 8; |
| 119 | const int8x8_t va2x0 = vld1_s8(a2); a2 += 8; |
| 120 | const int8x8_t va2x1 = vld1_s8(a2); a2 += 8; |
| 121 | const int8x8_t va3x0 = vld1_s8(a3); a3 += 8; |
| 122 | const int8x8_t va3x1 = vld1_s8(a3); a3 += 8; |
| 123 | |
| 124 | const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 125 | const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 126 | const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 127 | const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 128 | const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 129 | const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 130 | const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 131 | const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 132 | |
| 133 | const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 134 | int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0); |
| 135 | int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0); |
| 136 | int16x8_t vprod2x0 = vmull_s8(vb0x0, va2x0); |
| 137 | int16x8_t vprod3x0 = vmull_s8(vb0x0, va3x0); |
| 138 | vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1); |
| 139 | vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1); |
| 140 | vprod2x0 = vmlal_s8(vprod2x0, vb0x1, va2x1); |
| 141 | vprod3x0 = vmlal_s8(vprod3x0, vb0x1, va3x1); |
| 142 | vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); |
| 143 | vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); |
| 144 | vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); |
| 145 | vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0); |
| 146 | const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 147 | int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0); |
| 148 | int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0); |
| 149 | int16x8_t vprod2x1 = vmull_s8(vb1x0, va2x0); |
| 150 | int16x8_t vprod3x1 = vmull_s8(vb1x0, va3x0); |
| 151 | vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1); |
| 152 | vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1); |
| 153 | vprod2x1 = vmlal_s8(vprod2x1, vb1x1, va2x1); |
| 154 | vprod3x1 = vmlal_s8(vprod3x1, vb1x1, va3x1); |
| 155 | vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); |
| 156 | vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); |
| 157 | vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); |
| 158 | vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1); |
| 159 | const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 160 | int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0); |
| 161 | int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0); |
| 162 | int16x8_t vprod2x2 = vmull_s8(vb2x0, va2x0); |
| 163 | int16x8_t vprod3x2 = vmull_s8(vb2x0, va3x0); |
| 164 | vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1); |
| 165 | vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1); |
| 166 | vprod2x2 = vmlal_s8(vprod2x2, vb2x1, va2x1); |
| 167 | vprod3x2 = vmlal_s8(vprod3x2, vb2x1, va3x1); |
| 168 | vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); |
| 169 | vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); |
| 170 | vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); |
| 171 | vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2); |
| 172 | const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 173 | int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0); |
| 174 | int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0); |
| 175 | int16x8_t vprod2x3 = vmull_s8(vb3x0, va2x0); |
| 176 | int16x8_t vprod3x3 = vmull_s8(vb3x0, va3x0); |
| 177 | vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1); |
| 178 | vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1); |
| 179 | vprod2x3 = vmlal_s8(vprod2x3, vb3x1, va2x1); |
| 180 | vprod3x3 = vmlal_s8(vprod3x3, vb3x1, va3x1); |
| 181 | vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); |
| 182 | vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); |
| 183 | vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); |
| 184 | vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3); |
| 185 | const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 186 | int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0); |
| 187 | int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0); |
| 188 | int16x8_t vprod2x4 = vmull_s8(vb4x0, va2x0); |
| 189 | int16x8_t vprod3x4 = vmull_s8(vb4x0, va3x0); |
| 190 | vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); |
| 191 | vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); |
| 192 | vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); |
| 193 | vprod3x4 = vmlal_s8(vprod3x4, vb4x1, va3x1); |
| 194 | vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); |
| 195 | vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); |
| 196 | vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); |
| 197 | vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4); |
| 198 | const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 199 | int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0); |
| 200 | int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); |
| 201 | int16x8_t vprod2x5 = vmull_s8(vb5x0, va2x0); |
| 202 | int16x8_t vprod3x5 = vmull_s8(vb5x0, va3x0); |
| 203 | vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1); |
| 204 | vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1); |
| 205 | vprod2x5 = vmlal_s8(vprod2x5, vb5x1, va2x1); |
| 206 | vprod3x5 = vmlal_s8(vprod3x5, vb5x1, va3x1); |
| 207 | vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); |
| 208 | vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); |
| 209 | vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); |
| 210 | vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5); |
| 211 | const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 212 | int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0); |
| 213 | int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0); |
| 214 | int16x8_t vprod2x6 = vmull_s8(vb6x0, va2x0); |
| 215 | int16x8_t vprod3x6 = vmull_s8(vb6x0, va3x0); |
| 216 | vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1); |
| 217 | vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1); |
| 218 | vprod2x6 = vmlal_s8(vprod2x6, vb6x1, va2x1); |
| 219 | vprod3x6 = vmlal_s8(vprod3x6, vb6x1, va3x1); |
| 220 | vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); |
| 221 | vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); |
| 222 | vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); |
| 223 | vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6); |
| 224 | const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); |
| 225 | int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0); |
| 226 | int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0); |
| 227 | int16x8_t vprod2x7 = vmull_s8(vb7x0, va2x0); |
| 228 | int16x8_t vprod3x7 = vmull_s8(vb7x0, va3x0); |
| 229 | vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1); |
| 230 | vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1); |
| 231 | vprod2x7 = vmlal_s8(vprod2x7, vb7x1, va2x1); |
| 232 | vprod3x7 = vmlal_s8(vprod3x7, vb7x1, va3x1); |
| 233 | vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); |
| 234 | vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); |
| 235 | vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); |
| 236 | vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7); |
| 237 | |
| 238 | k -= 16 * sizeof(int8_t); |
| 239 | } |
| 240 | |
| 241 | // Handle 8 bytes at a time using MUL. |
| 242 | if (k > 0) { |
| 243 | const int8x8_t va0 = vld1_s8(a0); a0 += 8; |
| 244 | const int8x8_t va1 = vld1_s8(a1); a1 += 8; |
| 245 | const int8x8_t va2 = vld1_s8(a2); a2 += 8; |
| 246 | const int8x8_t va3 = vld1_s8(a3); a3 += 8; |
| 247 | |
| 248 | const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 249 | const int16x8_t vprod0x0 = vmull_s8(vb0, va0); |
| 250 | const int16x8_t vprod1x0 = vmull_s8(vb0, va1); |
| 251 | const int16x8_t vprod2x0 = vmull_s8(vb0, va2); |
| 252 | const int16x8_t vprod3x0 = vmull_s8(vb0, va3); |
| 253 | vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); |
| 254 | vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); |
| 255 | vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); |
| 256 | vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0); |
| 257 | const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 258 | const int16x8_t vprod0x1 = vmull_s8(vb1, va0); |
| 259 | const int16x8_t vprod1x1 = vmull_s8(vb1, va1); |
| 260 | const int16x8_t vprod2x1 = vmull_s8(vb1, va2); |
| 261 | const int16x8_t vprod3x1 = vmull_s8(vb1, va3); |
| 262 | vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); |
| 263 | vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); |
| 264 | vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); |
| 265 | vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1); |
| 266 | const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 267 | const int16x8_t vprod0x2 = vmull_s8(vb2, va0); |
| 268 | const int16x8_t vprod1x2 = vmull_s8(vb2, va1); |
| 269 | const int16x8_t vprod2x2 = vmull_s8(vb2, va2); |
| 270 | const int16x8_t vprod3x2 = vmull_s8(vb2, va3); |
| 271 | vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); |
| 272 | vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); |
| 273 | vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); |
| 274 | vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2); |
| 275 | const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 276 | const int16x8_t vprod0x3 = vmull_s8(vb3, va0); |
| 277 | const int16x8_t vprod1x3 = vmull_s8(vb3, va1); |
| 278 | const int16x8_t vprod2x3 = vmull_s8(vb3, va2); |
| 279 | const int16x8_t vprod3x3 = vmull_s8(vb3, va3); |
| 280 | vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); |
| 281 | vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); |
| 282 | vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); |
| 283 | vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3); |
| 284 | const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 285 | const int16x8_t vprod0x4 = vmull_s8(vb4, va0); |
| 286 | const int16x8_t vprod1x4 = vmull_s8(vb4, va1); |
| 287 | const int16x8_t vprod2x4 = vmull_s8(vb4, va2); |
| 288 | const int16x8_t vprod3x4 = vmull_s8(vb4, va3); |
| 289 | vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); |
| 290 | vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); |
| 291 | vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); |
| 292 | vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4); |
| 293 | const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 294 | const int16x8_t vprod0x5 = vmull_s8(vb5, va0); |
| 295 | const int16x8_t vprod1x5 = vmull_s8(vb5, va1); |
| 296 | const int16x8_t vprod2x5 = vmull_s8(vb5, va2); |
| 297 | const int16x8_t vprod3x5 = vmull_s8(vb5, va3); |
| 298 | vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); |
| 299 | vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); |
| 300 | vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); |
| 301 | vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5); |
| 302 | const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 303 | const int16x8_t vprod0x6 = vmull_s8(vb6, va0); |
| 304 | const int16x8_t vprod1x6 = vmull_s8(vb6, va1); |
| 305 | const int16x8_t vprod2x6 = vmull_s8(vb6, va2); |
| 306 | const int16x8_t vprod3x6 = vmull_s8(vb6, va3); |
| 307 | vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); |
| 308 | vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); |
| 309 | vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); |
| 310 | vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6); |
| 311 | const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 312 | const int16x8_t vprod0x7 = vmull_s8(vb7, va0); |
| 313 | const int16x8_t vprod1x7 = vmull_s8(vb7, va1); |
| 314 | const int16x8_t vprod2x7 = vmull_s8(vb7, va2); |
| 315 | const int16x8_t vprod3x7 = vmull_s8(vb7, va3); |
| 316 | vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); |
| 317 | vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); |
| 318 | vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); |
| 319 | vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7); |
| 320 | |
| 321 | k -= 8 * sizeof(int8_t); |
| 322 | } |
| 323 | |
| 324 | p -= 4 * sizeof(void*); |
| 325 | } while (p != 0); |
| 326 | |
| 327 | #if XNN_ARCH_ARM64 |
| 328 | const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); |
| 329 | const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); |
| 330 | const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); |
| 331 | const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); |
| 332 | const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); |
| 333 | const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); |
| 334 | const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); |
| 335 | const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); |
| 336 | const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); |
| 337 | const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); |
| 338 | const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); |
| 339 | const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); |
| 340 | const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1); |
| 341 | const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3); |
| 342 | const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5); |
| 343 | const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7); |
| 344 | int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); |
| 345 | int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); |
| 346 | int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); |
| 347 | int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); |
| 348 | int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); |
| 349 | int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); |
| 350 | int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23); |
| 351 | int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67); |
| 352 | #else |
| 353 | const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); |
| 354 | const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); |
| 355 | const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); |
| 356 | const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); |
| 357 | const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); |
| 358 | const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); |
| 359 | int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); |
| 360 | const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); |
| 361 | const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); |
| 362 | const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); |
| 363 | const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); |
| 364 | const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); |
| 365 | const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); |
| 366 | int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); |
| 367 | const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); |
| 368 | const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); |
| 369 | const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); |
| 370 | const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); |
| 371 | const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); |
| 372 | const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); |
| 373 | int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); |
| 374 | const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); |
| 375 | const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); |
| 376 | const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); |
| 377 | const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); |
| 378 | const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); |
| 379 | const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); |
| 380 | int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); |
| 381 | const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); |
| 382 | const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); |
| 383 | const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); |
| 384 | const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); |
| 385 | const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); |
| 386 | const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); |
| 387 | int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); |
| 388 | const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); |
| 389 | const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); |
| 390 | const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); |
| 391 | const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); |
| 392 | const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); |
| 393 | const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); |
| 394 | int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); |
| 395 | const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0)); |
| 396 | const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1)); |
| 397 | const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2)); |
| 398 | const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3)); |
| 399 | const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1); |
| 400 | const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3); |
| 401 | int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 ); |
| 402 | const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4)); |
| 403 | const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5)); |
| 404 | const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6)); |
| 405 | const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7)); |
| 406 | const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5); |
| 407 | const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7); |
| 408 | int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 ); |
| 409 | #endif |
| 410 | |
| 411 | const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); |
| 412 | vacc0x0123 = vqrdmulhq_s32(vacc0x0123, vmultiplier); |
| 413 | vacc0x4567 = vqrdmulhq_s32(vacc0x4567, vmultiplier); |
| 414 | vacc1x0123 = vqrdmulhq_s32(vacc1x0123, vmultiplier); |
| 415 | vacc1x4567 = vqrdmulhq_s32(vacc1x4567, vmultiplier); |
| 416 | vacc2x0123 = vqrdmulhq_s32(vacc2x0123, vmultiplier); |
| 417 | vacc2x4567 = vqrdmulhq_s32(vacc2x4567, vmultiplier); |
| 418 | vacc3x0123 = vqrdmulhq_s32(vacc3x0123, vmultiplier); |
| 419 | vacc3x4567 = vqrdmulhq_s32(vacc3x4567, vmultiplier); |
| 420 | |
| 421 | const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); |
| 422 | const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); |
| 423 | vacc0x0123 = vsraq_n_s32(vacc0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31); |
| 424 | vacc0x4567 = vsraq_n_s32(vacc0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31); |
| 425 | vacc1x0123 = vsraq_n_s32(vacc1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31); |
| 426 | vacc1x4567 = vsraq_n_s32(vacc1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31); |
| 427 | vacc2x0123 = vsraq_n_s32(vacc2x0123, vbicq_s32(vacc2x0123, vzero_shift_mask), 31); |
| 428 | vacc2x4567 = vsraq_n_s32(vacc2x4567, vbicq_s32(vacc2x4567, vzero_shift_mask), 31); |
| 429 | vacc3x0123 = vsraq_n_s32(vacc3x0123, vbicq_s32(vacc3x0123, vzero_shift_mask), 31); |
| 430 | vacc3x4567 = vsraq_n_s32(vacc3x4567, vbicq_s32(vacc3x4567, vzero_shift_mask), 31); |
| 431 | |
| 432 | vacc0x0123 = vrshlq_s32(vacc0x0123, vright_shift); |
| 433 | vacc0x4567 = vrshlq_s32(vacc0x4567, vright_shift); |
| 434 | vacc1x0123 = vrshlq_s32(vacc1x0123, vright_shift); |
| 435 | vacc1x4567 = vrshlq_s32(vacc1x4567, vright_shift); |
| 436 | vacc2x0123 = vrshlq_s32(vacc2x0123, vright_shift); |
| 437 | vacc2x4567 = vrshlq_s32(vacc2x4567, vright_shift); |
| 438 | vacc3x0123 = vrshlq_s32(vacc3x0123, vright_shift); |
| 439 | vacc3x4567 = vrshlq_s32(vacc3x4567, vright_shift); |
| 440 | |
| 441 | const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point); |
| 442 | #if XNN_ARCH_ARM64 |
| 443 | const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); |
| 444 | const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); |
| 445 | const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); |
| 446 | const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); |
| 447 | int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); |
| 448 | int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); |
| 449 | #else |
| 450 | const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); |
| 451 | const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); |
| 452 | const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); |
| 453 | const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); |
| 454 | |
| 455 | int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); |
| 456 | int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); |
| 457 | #endif |
| 458 | const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min); |
| 459 | const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max); |
| 460 | |
| 461 | vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); |
| 462 | vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); |
| 463 | |
| 464 | vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); |
| 465 | vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); |
| 466 | |
| 467 | if (nc >= 8) { |
| 468 | vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); |
| 469 | vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); |
| 470 | vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); |
| 471 | vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); |
| 472 | |
| 473 | c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); |
| 474 | c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); |
| 475 | c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); |
| 476 | c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); |
| 477 | |
| 478 | a = (const int8_t**restrict) ((uintptr_t) a - ks); |
| 479 | |
| 480 | nc -= 8; |
| 481 | } else { |
| 482 | if (nc & 4) { |
| 483 | vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; |
| 484 | vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; |
| 485 | vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; |
| 486 | vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; |
| 487 | vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); |
| 488 | vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); |
| 489 | } |
| 490 | if (nc & 2) { |
| 491 | vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; |
| 492 | vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; |
| 493 | vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; |
| 494 | vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; |
| 495 | vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); |
| 496 | vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); |
| 497 | } |
| 498 | if (nc & 1) { |
| 499 | vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); |
| 500 | vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); |
| 501 | vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); |
| 502 | vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); |
| 503 | } |
| 504 | |
| 505 | nc = 0; |
| 506 | } |
| 507 | } while (nc != 0); |
| 508 | } |