Marat Dukhan | 30d4b25 | 2020-10-29 16:33:22 -0700 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2020 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <arm_neon.h> |
| 13 | |
| 14 | #include <xnnpack/dwconv.h> |
| 15 | #include <xnnpack/math.h> |
| 16 | |
| 17 | |
| 18 | void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc2( |
| 19 | size_t input_height, |
| 20 | size_t input_width, |
| 21 | const float* input, |
| 22 | const float* weights, |
| 23 | const float* zero, |
| 24 | float* output, |
| 25 | uint32_t padding_top, |
| 26 | const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| 27 | { |
| 28 | assert(input_height != 0); |
| 29 | assert(input_width != 0); |
| 30 | assert(input_width % sizeof(float) == 0); |
| 31 | assert(padding_top >= 1); |
| 32 | assert(padding_top <= 2); |
| 33 | |
| 34 | const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even); |
| 35 | const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd); |
| 36 | const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max); |
| 37 | const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min); |
| 38 | |
| 39 | const float32x4_t vw0123 = vld1q_f32(weights); |
| 40 | const float32x4_t vw4567 = vld1q_f32(weights + 4); |
| 41 | const float32x4_t vw89AB = vld1q_f32(weights + 8); |
| 42 | const float32x4_t vwCDEF = vld1q_f32(weights + 12); |
| 43 | const float32x4_t vwGHIJ = vld1q_f32(weights + 16); |
| 44 | const float32x4_t vwKLMN = vld1q_f32(weights + 20); |
| 45 | const float32x2_t vwOP = vld1_f32(weights + 24); |
| 46 | |
| 47 | const uint32_t padding_top_less_1 = padding_top - 1; |
| 48 | const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float)); |
| 49 | |
| 50 | const float* i0 = zero; |
| 51 | const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width)); |
| 52 | const float* i2 = (const float*) ((uintptr_t) i1 + input_width); |
| 53 | if XNN_UNPREDICTABLE(padding_top_less_1 != 0) { |
| 54 | i1 = zero; |
| 55 | } |
| 56 | const float* i3 = (const float*) ((uintptr_t) i2 + input_width); |
| 57 | const float* i4 = (const float*) ((uintptr_t) i3 + input_width); |
| 58 | |
| 59 | |
| 60 | float* o0 = output; |
| 61 | |
| 62 | size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */; |
| 63 | size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2; |
| 64 | do { |
| 65 | if XNN_UNPREDICTABLE(padded_input_height < 6) { |
| 66 | i3 = zero; |
| 67 | } |
| 68 | if XNN_UNPREDICTABLE(padded_input_height < 7) { |
| 69 | i4 = zero; |
| 70 | } |
| 71 | |
| 72 | float32x4_t vi0x0246 = vmovq_n_f32(0.0f); |
| 73 | float32x4_t vi1x0246 = vmovq_n_f32(0.0f); |
| 74 | float32x4_t vi2x0246 = vmovq_n_f32(0.0f); |
| 75 | float32x4_t vi3x0246 = vmovq_n_f32(0.0f); |
| 76 | float32x4_t vi4x0246 = vmovq_n_f32(0.0f); |
| 77 | |
| 78 | float32x4_t vi0x1357 = vmovq_n_f32(0.0f); |
| 79 | float32x4_t vi1x1357 = vmovq_n_f32(0.0f); |
| 80 | float32x4_t vi2x1357 = vmovq_n_f32(0.0f); |
| 81 | float32x4_t vi3x1357 = vmovq_n_f32(0.0f); |
| 82 | float32x4_t vi4x1357 = vmovq_n_f32(0.0f); |
| 83 | |
| 84 | float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8; |
| 85 | float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8; |
| 86 | float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8; |
| 87 | float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8; |
| 88 | float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8; |
| 89 | |
| 90 | size_t w = input_width; |
| 91 | for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) { |
| 92 | float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0); |
| 93 | |
| 94 | float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1); |
| 95 | |
| 96 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0); |
| 97 | |
| 98 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1); |
| 99 | |
| 100 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0); |
| 101 | |
| 102 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1); |
| 103 | |
| 104 | vo0p1 = vmlaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0); |
| 105 | |
| 106 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1); |
| 107 | |
| 108 | vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0); |
| 109 | |
| 110 | vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1); |
| 111 | |
| 112 | vo0p1 = vmlaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[1], vwOP, 0); |
| 113 | |
| 114 | const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3); |
| 115 | vi0x0246 = vi0x8ACE9BDF.val[0]; |
| 116 | const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3); |
| 117 | vi1x0246 = vi1x8ACE9BDF.val[0]; |
| 118 | const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3); |
| 119 | vi2x0246 = vi2x8ACE9BDF.val[0]; |
| 120 | const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3); |
| 121 | vi3x0246 = vi3x8ACE9BDF.val[0]; |
| 122 | const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3); |
| 123 | vi4x0246 = vi4x8ACE9BDF.val[0]; |
| 124 | |
| 125 | vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1); |
| 126 | |
| 127 | vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0); |
| 128 | |
| 129 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1); |
| 130 | |
| 131 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0); |
| 132 | |
| 133 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1); |
| 134 | |
| 135 | const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3); |
| 136 | vi0x1357 = vi0x8ACE9BDF.val[1]; |
| 137 | const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3); |
| 138 | vi1x1357 = vi1x8ACE9BDF.val[1]; |
| 139 | const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); |
| 140 | vi2x1357 = vi2x8ACE9BDF.val[1]; |
| 141 | const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3); |
| 142 | vi3x1357 = vi3x8ACE9BDF.val[1]; |
| 143 | const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); |
| 144 | vi4x1357 = vi4x8ACE9BDF.val[1]; |
| 145 | |
| 146 | const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8; |
| 147 | const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8; |
| 148 | const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8; |
| 149 | const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8; |
| 150 | const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8; |
| 151 | |
| 152 | vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0); |
| 153 | |
| 154 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1); |
| 155 | |
| 156 | vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0); |
| 157 | |
| 158 | vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1); |
| 159 | |
| 160 | vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0); |
| 161 | |
| 162 | const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1); |
| 163 | vi0x8ACE9BDF = vi0xGIKMHJLN; |
| 164 | const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1); |
| 165 | vi1x8ACE9BDF = vi1xGIKMHJLN; |
| 166 | const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1); |
| 167 | vi2x8ACE9BDF = vi2xGIKMHJLN; |
| 168 | const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1); |
| 169 | vi3x8ACE9BDF = vi3xGIKMHJLN; |
| 170 | const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1); |
| 171 | vi4x8ACE9BDF = vi4xGIKMHJLN; |
| 172 | |
| 173 | vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1); |
| 174 | |
| 175 | vo0p1 = vmlaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0); |
| 176 | |
| 177 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1); |
| 178 | |
| 179 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0); |
| 180 | |
| 181 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1); |
| 182 | |
| 183 | vo0p0 = vaddq_f32(vo0p0, vo0p1); |
| 184 | |
| 185 | float32x4_t vo0 = vmaxq_f32(vo0p0, vmin); |
| 186 | |
| 187 | vo0 = vminq_f32(vo0, vmax); |
| 188 | |
| 189 | vst1q_f32(o0, vo0); o0 += 4; |
| 190 | } |
| 191 | // Last block has 1-8 pixels to process. |
| 192 | assert(w <= 8 * sizeof(float)); |
| 193 | assert(w >= 1 * sizeof(float)); |
| 194 | { |
| 195 | float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0); |
| 196 | |
| 197 | const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0]))); |
| 198 | const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0]))); |
| 199 | const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0]))); |
| 200 | const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0]))); |
| 201 | const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0]))); |
| 202 | |
| 203 | const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1]))); |
| 204 | const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1]))); |
| 205 | const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1]))); |
| 206 | const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1]))); |
| 207 | const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1]))); |
| 208 | |
| 209 | float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1); |
| 210 | |
| 211 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw89AB), 0); |
| 212 | |
| 213 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE, vget_low_f32(vwCDEF), 1); |
| 214 | |
| 215 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE, vget_high_f32(vwGHIJ), 0); |
| 216 | |
| 217 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1); |
| 218 | |
| 219 | vo0p1 = vmlaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0); |
| 220 | |
| 221 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1); |
| 222 | |
| 223 | vo0p1 = vmlaq_lane_f32(vo0p1, vi2x9BDF, vget_high_f32(vwCDEF), 0); |
| 224 | |
| 225 | vo0p0 = vmlaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1); |
| 226 | |
| 227 | vo0p1 = vmlaq_lane_f32(vo0p1, vi4x9BDF, vwOP, 0); |
| 228 | |
| 229 | const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3); |
| 230 | const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3); |
| 231 | const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3); |
| 232 | const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3); |
| 233 | const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3); |
| 234 | |
| 235 | vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1); |
| 236 | |
| 237 | vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0); |
| 238 | |
| 239 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1); |
| 240 | |
| 241 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0); |
| 242 | |
| 243 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1); |
| 244 | |
| 245 | const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3); |
| 246 | const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3); |
| 247 | const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3); |
| 248 | const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3); |
| 249 | const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3); |
| 250 | |
| 251 | vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0); |
| 252 | |
| 253 | vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1); |
| 254 | |
| 255 | vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0); |
| 256 | |
| 257 | vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1); |
| 258 | |
| 259 | vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0); |
| 260 | |
| 261 | const float32x4_t vzero = vmovq_n_f32(0.0f); |
| 262 | const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1); |
| 263 | const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1); |
| 264 | const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1); |
| 265 | const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1); |
| 266 | const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1); |
| 267 | |
| 268 | vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1); |
| 269 | |
| 270 | vo0p1 = vmlaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0); |
| 271 | |
| 272 | vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1); |
| 273 | |
| 274 | vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0); |
| 275 | |
| 276 | vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1); |
| 277 | |
| 278 | vo0p0 = vaddq_f32(vo0p0, vo0p1); |
| 279 | |
| 280 | float32x4_t vo0 = vmaxq_f32(vo0p0, vmin); |
| 281 | |
| 282 | vo0 = vminq_f32(vo0, vmax); |
| 283 | |
| 284 | size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float)); |
| 285 | if XNN_LIKELY(w_tmp >= 4) { |
| 286 | vst1q_f32(o0, vo0); o0 += 4; |
| 287 | } else { |
| 288 | float32x2_t vo0_lo = vget_low_f32(vo0); |
| 289 | if (w_tmp & 2) { |
| 290 | vst1_f32(o0, vo0_lo); o0 += 2; |
| 291 | |
| 292 | vo0_lo = vget_high_f32(vo0); |
| 293 | } |
| 294 | if (w_tmp & 1) { |
| 295 | vst1_lane_f32(o0, vo0_lo, 0); o0 += 1; |
| 296 | } |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | i0 = (const float*) ((uintptr_t) i2 - input_decrement); |
| 301 | i1 = (const float*) ((uintptr_t) i3 - input_decrement); |
| 302 | i2 = (const float*) ((uintptr_t) i4 - input_decrement); |
| 303 | i3 = (const float*) ((uintptr_t) i2 + input_width); |
| 304 | i4 = (const float*) ((uintptr_t) i3 + input_width); |
| 305 | |
| 306 | |
| 307 | output_height -= 1; |
| 308 | padded_input_height -= 2; |
| 309 | } while (output_height != 0); |
| 310 | } |