Frank Barchard | eb704f7 | 2021-11-12 01:26:50 -0800 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
Frank Barchard | e22685a | 2021-11-12 11:36:58 -0800 | [diff] [blame^] | 2 | // Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in |
Frank Barchard | eb704f7 | 2021-11-12 01:26:50 -0800 | [diff] [blame] | 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2021 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <arm_neon.h> |
| 13 | |
| 14 | #include <xnnpack/gemm.h> |
| 15 | #include <xnnpack/math.h> |
| 16 | |
| 17 | |
Frank Barchard | e22685a | 2021-11-12 11:36:58 -0800 | [diff] [blame^] | 18 | void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal( |
Frank Barchard | eb704f7 | 2021-11-12 01:26:50 -0800 | [diff] [blame] | 19 | size_t mr, |
| 20 | size_t nc, |
| 21 | size_t kc, |
| 22 | size_t ks, |
| 23 | const int8_t** restrict a, |
| 24 | const void* restrict w, |
| 25 | int8_t* restrict c, |
| 26 | size_t cm_stride, |
| 27 | size_t cn_stride, |
| 28 | size_t a_offset, |
| 29 | const int8_t* zero, |
| 30 | const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN |
| 31 | { |
| 32 | assert(mr != 0); |
| 33 | assert(mr <= 1); |
| 34 | assert(nc != 0); |
| 35 | assert(kc != 0); |
| 36 | assert(ks != 0); |
| 37 | assert(ks % (1 * sizeof(void*)) == 0); |
| 38 | assert(a_offset % sizeof(int8_t) == 0); |
| 39 | assert(a != NULL); |
| 40 | assert(w != NULL); |
| 41 | assert(c != NULL); |
| 42 | |
| 43 | int8_t* c0 = c; |
| 44 | |
| 45 | do { |
| 46 | int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 47 | int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 48 | int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 49 | int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 50 | int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 51 | int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 52 | int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 53 | int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); |
| 54 | |
| 55 | size_t p = ks; |
| 56 | do { |
| 57 | const int8_t* restrict a0 = a[0]; |
| 58 | if XNN_UNPREDICTABLE(a0 != zero) { |
| 59 | a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); |
| 60 | } |
| 61 | a += 1; |
| 62 | |
| 63 | size_t k = kc; |
| 64 | |
| 65 | while (k >= 16 * sizeof(int8_t)) { |
| 66 | int8x8_t va0x0 = vld1_s8(a0); a0 += 8; |
| 67 | int8x8_t va0x1 = vld1_s8(a0); a0 += 8; |
| 68 | |
| 69 | const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 70 | const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 71 | const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 72 | const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 73 | const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 74 | const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 75 | const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 76 | const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 77 | const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 78 | const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 79 | const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 80 | const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 81 | const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 82 | const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 83 | const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 84 | const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 85 | |
| 86 | int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); |
| 87 | const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 88 | vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1); |
| 89 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); |
| 90 | int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); |
| 91 | const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 92 | vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1); |
| 93 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); |
| 94 | int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); |
| 95 | const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 96 | vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1); |
| 97 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); |
| 98 | int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); |
| 99 | const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 100 | vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1); |
| 101 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); |
| 102 | int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0); |
| 103 | const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 104 | vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1); |
| 105 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0); |
| 106 | int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0); |
| 107 | const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 108 | vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1); |
| 109 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0); |
| 110 | int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0); |
| 111 | const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 112 | vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1); |
| 113 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0); |
| 114 | int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0); |
| 115 | const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 116 | vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1); |
| 117 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0); |
| 118 | va0x0 = vext_s8(va0x0, va0x0, 4); |
| 119 | va0x1 = vext_s8(va0x1, va0x1, 4); |
| 120 | int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); |
| 121 | const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 122 | vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1); |
| 123 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); |
| 124 | int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); |
| 125 | const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 126 | vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1); |
| 127 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); |
| 128 | int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); |
| 129 | const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 130 | vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1); |
| 131 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); |
| 132 | int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); |
| 133 | const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 134 | vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1); |
| 135 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); |
| 136 | int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0); |
| 137 | const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 138 | vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1); |
| 139 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1); |
| 140 | int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0); |
| 141 | const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 142 | vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1); |
| 143 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1); |
| 144 | int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0); |
| 145 | const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 146 | vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1); |
| 147 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1); |
| 148 | int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0); |
| 149 | const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 150 | vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1); |
| 151 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1); |
| 152 | |
| 153 | k -= 16 * sizeof(int8_t); |
| 154 | } |
| 155 | |
| 156 | if (k >= 8 * sizeof(int8_t)) { |
| 157 | int8x8_t va0x0 = vld1_s8(a0); a0 += 8; |
| 158 | |
| 159 | const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 160 | const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 161 | const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 162 | const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 163 | const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 164 | const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 165 | const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 166 | const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 167 | const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 168 | const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 169 | const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 170 | const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 171 | const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 172 | const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 173 | const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 174 | const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 175 | |
| 176 | int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); |
| 177 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); |
| 178 | int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); |
| 179 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); |
| 180 | int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); |
| 181 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); |
| 182 | int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); |
| 183 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); |
| 184 | int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0); |
| 185 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0); |
| 186 | int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0); |
| 187 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0); |
| 188 | int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0); |
| 189 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0); |
| 190 | int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0); |
| 191 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0); |
| 192 | va0x0 = vext_s8(va0x0, va0x0, 4); |
| 193 | int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); |
| 194 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); |
| 195 | int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); |
| 196 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); |
| 197 | int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); |
| 198 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); |
| 199 | int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); |
| 200 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); |
| 201 | int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0); |
| 202 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1); |
| 203 | int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0); |
| 204 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1); |
| 205 | int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0); |
| 206 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1); |
| 207 | int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0); |
| 208 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1); |
| 209 | |
| 210 | k -= 8 * sizeof(int8_t); |
| 211 | } |
| 212 | |
| 213 | if XNN_UNLIKELY(k != 0) { |
| 214 | const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); |
| 215 | |
| 216 | const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 217 | const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 218 | const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 219 | const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 220 | const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 221 | const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 222 | const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 223 | const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 224 | |
| 225 | const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); |
| 226 | const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); |
| 227 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); |
| 228 | const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); |
| 229 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); |
| 230 | const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); |
| 231 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); |
| 232 | const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); |
| 233 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); |
| 234 | const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0); |
| 235 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0); |
| 236 | const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0); |
| 237 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0); |
| 238 | const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0); |
| 239 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0); |
| 240 | const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0); |
| 241 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0); |
| 242 | |
| 243 | if (k > 4 * sizeof(int8_t)) { |
| 244 | const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 245 | const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 246 | const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 247 | const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 248 | const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 249 | const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 250 | const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 251 | const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); |
| 252 | |
| 253 | const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1)); |
| 254 | const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); |
| 255 | vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); |
| 256 | const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); |
| 257 | vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); |
| 258 | const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); |
| 259 | vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); |
| 260 | const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); |
| 261 | vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); |
| 262 | const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1); |
| 263 | vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1); |
| 264 | const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1); |
| 265 | vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1); |
| 266 | const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1); |
| 267 | vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1); |
| 268 | const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1); |
| 269 | vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | p -= 1 * sizeof(void*); |
| 274 | } while (p != 0); |
| 275 | |
| 276 | #if XNN_ARCH_ARM64 |
| 277 | int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); |
| 278 | int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); |
| 279 | int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB); |
| 280 | int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF); |
| 281 | #else |
| 282 | const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); |
| 283 | const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); |
| 284 | int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); |
| 285 | const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); |
| 286 | const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); |
| 287 | int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); |
| 288 | const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89)); |
| 289 | const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB)); |
| 290 | int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB); |
| 291 | const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); |
| 292 | const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF)); |
| 293 | int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); |
| 294 | #endif |
| 295 | |
| 296 | const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift); |
| 297 | const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier); |
| 298 | const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift); |
| 299 | |
| 300 | vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift); |
| 301 | vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift); |
| 302 | vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift); |
| 303 | vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift); |
| 304 | |
| 305 | vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); |
| 306 | vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); |
| 307 | vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier); |
| 308 | vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier); |
| 309 | |
| 310 | vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); |
| 311 | vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); |
| 312 | vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift); |
| 313 | vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift); |
| 314 | |
| 315 | const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point); |
| 316 | #if XNN_ARCH_ARM64 |
| 317 | const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); |
| 318 | const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point); |
| 319 | |
| 320 | int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); |
| 321 | #else |
| 322 | const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); |
| 323 | const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point); |
| 324 | |
| 325 | int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); |
| 326 | #endif |
| 327 | const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min); |
| 328 | const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max); |
| 329 | |
| 330 | vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); |
| 331 | |
| 332 | vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); |
| 333 | |
| 334 | if (nc >= 16) { |
| 335 | vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); |
| 336 | |
| 337 | c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); |
| 338 | |
| 339 | a = (const int8_t**restrict) ((uintptr_t) a - ks); |
| 340 | |
| 341 | nc -= 16; |
| 342 | } else { |
| 343 | int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); |
| 344 | if (nc & 8) { |
| 345 | vst1_s8(c0, vout0x01234567); c0 += 8; |
| 346 | vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); |
| 347 | } |
| 348 | if (nc & 4) { |
| 349 | vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; |
| 350 | vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); |
| 351 | } |
| 352 | if (nc & 2) { |
| 353 | vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; |
| 354 | vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); |
| 355 | } |
| 356 | if (nc & 1) { |
| 357 | vst1_lane_s8(c0, vout0x01234567, 0); |
| 358 | } |
| 359 | |
| 360 | nc = 0; |
| 361 | } |
| 362 | } while (nc != 0); |
| 363 | } |