Marat Dukhan | d1f53e4 | 2022-01-12 22:34:51 -0800 | [diff] [blame^] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/qs8-gavgpool/unipass-neon.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2020 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <arm_neon.h> |
| 13 | |
| 14 | #include <xnnpack/gavgpool.h> |
| 15 | |
| 16 | |
| 17 | void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c16( |
| 18 | size_t rows, |
| 19 | size_t channels, |
| 20 | const uint8_t* input, |
| 21 | size_t input_stride, |
| 22 | const uint8_t* zero, |
| 23 | uint8_t* output, |
| 24 | const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| 25 | { |
| 26 | assert(rows != 0); |
| 27 | assert(rows <= 7); |
| 28 | assert(channels != 0); |
| 29 | |
| 30 | const uint8_t* i0 = input; |
| 31 | const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride); |
| 32 | if XNN_UNPREDICTABLE(rows < 2) { |
| 33 | i1 = zero; |
| 34 | } |
| 35 | const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride); |
| 36 | if XNN_UNPREDICTABLE(rows <= 2) { |
| 37 | i2 = zero; |
| 38 | } |
| 39 | const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride); |
| 40 | if XNN_UNPREDICTABLE(rows < 4) { |
| 41 | i3 = zero; |
| 42 | } |
| 43 | const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride); |
| 44 | if XNN_UNPREDICTABLE(rows <= 4) { |
| 45 | i4 = zero; |
| 46 | } |
| 47 | const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride); |
| 48 | if XNN_UNPREDICTABLE(rows < 6) { |
| 49 | i5 = zero; |
| 50 | } |
| 51 | const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride); |
| 52 | if XNN_UNPREDICTABLE(rows <= 6) { |
| 53 | i6 = zero; |
| 54 | } |
| 55 | |
| 56 | const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias); |
| 57 | const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale); |
| 58 | const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias); |
| 59 | const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point); |
| 60 | const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min); |
| 61 | const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max); |
| 62 | for (; channels >= 16; channels -= 16) { |
| 63 | const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8; |
| 64 | const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8; |
| 65 | const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8; |
| 66 | const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8; |
| 67 | |
| 68 | const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8; |
| 69 | uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567); |
| 70 | const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8; |
| 71 | uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF); |
| 72 | |
| 73 | const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8; |
| 74 | vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567); |
| 75 | const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8; |
| 76 | vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF); |
| 77 | const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8; |
| 78 | vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567); |
| 79 | const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8; |
| 80 | vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF); |
| 81 | const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8; |
| 82 | vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567); |
| 83 | const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8; |
| 84 | vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF); |
| 85 | const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8; |
| 86 | vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567); |
| 87 | const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8; |
| 88 | vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF); |
| 89 | vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567); |
| 90 | vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF); |
| 91 | |
| 92 | int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567))); |
| 93 | int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567))); |
| 94 | int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF))); |
| 95 | int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF))); |
| 96 | |
| 97 | float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123); |
| 98 | float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567); |
| 99 | float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB); |
| 100 | float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF); |
| 101 | |
| 102 | vfpacc0123 = vmulq_f32(vfpacc0123, vscale); |
| 103 | vfpacc4567 = vmulq_f32(vfpacc4567, vscale); |
| 104 | vfpacc89AB = vmulq_f32(vfpacc89AB, vscale); |
| 105 | vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale); |
| 106 | |
| 107 | vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias)); |
| 108 | vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias)); |
| 109 | vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias)); |
| 110 | vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias)); |
| 111 | |
| 112 | vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point); |
| 113 | vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point); |
| 114 | vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point); |
| 115 | vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point); |
| 116 | |
| 117 | #if XNN_ARCH_ARM64 |
| 118 | int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567); |
| 119 | int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF); |
| 120 | #else // !XNN_ARCH_ARM64 |
| 121 | int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)); |
| 122 | int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)); |
| 123 | #endif // !XNN_ARCH_ARM64 |
| 124 | |
| 125 | |
| 126 | #if XNN_ARCH_ARM64 |
| 127 | uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF); |
| 128 | #else // !XNN_ARCH_ARM64 |
| 129 | uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF)); |
| 130 | #endif // !XNN_ARCH_ARM64 |
| 131 | |
| 132 | vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min); |
| 133 | |
| 134 | vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max); |
| 135 | |
| 136 | vst1q_u8(output, vout0123456789ABCDEF); output += 16; |
| 137 | } |
| 138 | if XNN_UNLIKELY(channels != 0) { |
| 139 | do { |
| 140 | const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8; |
| 141 | const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8; |
| 142 | const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8; |
| 143 | uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567); |
| 144 | |
| 145 | const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8; |
| 146 | vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567); |
| 147 | const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8; |
| 148 | vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567); |
| 149 | const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8; |
| 150 | vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567); |
| 151 | const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8; |
| 152 | vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567); |
| 153 | vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567); |
| 154 | |
| 155 | int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567))); |
| 156 | int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567))); |
| 157 | |
| 158 | float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123); |
| 159 | float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567); |
| 160 | |
| 161 | vfpacc0123 = vmulq_f32(vfpacc0123, vscale); |
| 162 | vfpacc4567 = vmulq_f32(vfpacc4567, vscale); |
| 163 | |
| 164 | vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias)); |
| 165 | vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias)); |
| 166 | |
| 167 | vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point); |
| 168 | vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point); |
| 169 | |
| 170 | #if XNN_ARCH_ARM64 |
| 171 | int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567); |
| 172 | #else |
| 173 | int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)); |
| 174 | #endif |
| 175 | |
| 176 | uint8x8_t vout01234567 = vqmovun_s16(vacc01234567); |
| 177 | vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min)); |
| 178 | vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max)); |
| 179 | |
| 180 | if XNN_LIKELY(channels >= 8) { |
| 181 | vst1_u8(output, vout01234567); output += 8; |
| 182 | channels -= 8; |
| 183 | } else { |
| 184 | if (channels & 4) { |
| 185 | vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4; |
| 186 | vout01234567 = vext_u8(vout01234567, vout01234567, 4); |
| 187 | } |
| 188 | if (channels & 2) { |
| 189 | vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2; |
| 190 | vout01234567 = vext_u8(vout01234567, vout01234567, 2); |
| 191 | } |
| 192 | if (channels & 1) { |
| 193 | vst1_lane_u8(output, vout01234567, 0); output += 1; |
| 194 | } |
| 195 | channels = 0; |
| 196 | } |
| 197 | } while (channels != 0); |
| 198 | } |
| 199 | } |