XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/f32-gemm/MRx2-neon-ld64.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2019 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <arm_neon.h> |
| 13 | |
Marat Dukhan | c72fa1e | 2019-11-27 11:54:03 -0800 | [diff] [blame] | 14 | #include <xnnpack/common.h> |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 15 | #include <xnnpack/gemm.h> |
| 16 | |
| 17 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 18 | void xnn_f32_gemm_ukernel_4x2__neon_lane_ld64( |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 19 | size_t mr, |
| 20 | size_t nc, |
| 21 | size_t kc, |
| 22 | const float* restrict a, |
| 23 | size_t a_stride, |
| 24 | const float*restrict w, |
| 25 | float*restrict c, |
| 26 | size_t cm_stride, |
| 27 | size_t cn_stride, |
Marat Dukhan | eb09a6b | 2020-04-08 17:34:32 -0700 | [diff] [blame] | 28 | const union xnn_f32_minmax_params params[restrict static 1]) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 29 | { |
| 30 | assert(mr != 0); |
| 31 | assert(mr <= 4); |
| 32 | assert(nc != 0); |
| 33 | assert(kc != 0); |
| 34 | assert(kc % sizeof(float) == 0); |
| 35 | assert(a != NULL); |
| 36 | assert(w != NULL); |
| 37 | assert(c != NULL); |
| 38 | |
| 39 | const float* a0 = a; |
| 40 | float* c0 = c; |
| 41 | const float* a1 = (const float*) ((uintptr_t) a0 + a_stride); |
| 42 | float* c1 = (float*) ((uintptr_t) c0 + cm_stride); |
| 43 | if XNN_UNPREDICTABLE(mr < 2) { |
| 44 | a1 = a0; |
| 45 | c1 = c0; |
| 46 | } |
| 47 | const float* a2 = (const float*) ((uintptr_t) a1 + a_stride); |
| 48 | float* c2 = (float*) ((uintptr_t) c1 + cm_stride); |
| 49 | if XNN_UNPREDICTABLE(mr <= 2) { |
| 50 | a2 = a1; |
| 51 | c2 = c1; |
| 52 | } |
| 53 | const float* a3 = (const float*) ((uintptr_t) a2 + a_stride); |
| 54 | float* c3 = (float*) ((uintptr_t) c2 + cm_stride); |
| 55 | if XNN_UNPREDICTABLE(mr != 4) { |
| 56 | a3 = a2; |
| 57 | c3 = c2; |
| 58 | } |
| 59 | |
| 60 | do { |
| 61 | float32x2_t vacc0x01 = vld1_f32(w); w += 2; |
| 62 | float32x2_t vacc1x01 = vacc0x01; |
| 63 | float32x2_t vacc2x01 = vacc0x01; |
| 64 | float32x2_t vacc3x01 = vacc0x01; |
| 65 | |
| 66 | size_t k = kc; |
| 67 | for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) { |
| 68 | const float32x2_t va0 = vld1_f32(a0); a0 += 2; |
| 69 | const float32x2_t va1 = vld1_f32(a1); a1 += 2; |
| 70 | const float32x2_t va2 = vld1_f32(a2); a2 += 2; |
| 71 | const float32x2_t va3 = vld1_f32(a3); a3 += 2; |
| 72 | |
| 73 | const float32x2_t vb01c0 = vld1_f32(w); w += 2; |
| 74 | |
| 75 | vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0); |
| 76 | vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0); |
| 77 | vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0); |
| 78 | vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0); |
| 79 | const float32x2_t vb01c1 = vld1_f32(w); w += 2; |
| 80 | |
| 81 | vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1); |
| 82 | vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1); |
| 83 | vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1); |
| 84 | vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1); |
| 85 | } |
| 86 | if XNN_UNLIKELY(k != 0) { |
| 87 | const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1; |
| 88 | const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1; |
| 89 | const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1; |
| 90 | const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1; |
| 91 | |
| 92 | const float32x2_t vb01 = vld1_f32(w); w += 2; |
| 93 | |
| 94 | vacc0x01 = vmla_f32(vacc0x01, va0, vb01); |
| 95 | vacc1x01 = vmla_f32(vacc1x01, va1, vb01); |
| 96 | vacc2x01 = vmla_f32(vacc2x01, va2, vb01); |
| 97 | vacc3x01 = vmla_f32(vacc3x01, va3, vb01); |
| 98 | } |
| 99 | |
| 100 | const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max); |
| 101 | vacc0x01 = vmin_f32(vacc0x01, vmax); |
| 102 | vacc1x01 = vmin_f32(vacc1x01, vmax); |
| 103 | vacc2x01 = vmin_f32(vacc2x01, vmax); |
| 104 | vacc3x01 = vmin_f32(vacc3x01, vmax); |
| 105 | |
| 106 | const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min); |
| 107 | vacc0x01 = vmax_f32(vacc0x01, vmin); |
| 108 | vacc1x01 = vmax_f32(vacc1x01, vmin); |
| 109 | vacc2x01 = vmax_f32(vacc2x01, vmin); |
| 110 | vacc3x01 = vmax_f32(vacc3x01, vmin); |
| 111 | |
| 112 | if XNN_LIKELY(nc >= 2) { |
| 113 | vst1_f32(c0, vacc0x01); |
| 114 | c0 = (float*) ((uintptr_t) c0 + cn_stride); |
| 115 | vst1_f32(c1, vacc1x01); |
| 116 | c1 = (float*) ((uintptr_t) c1 + cn_stride); |
| 117 | vst1_f32(c2, vacc2x01); |
| 118 | c2 = (float*) ((uintptr_t) c2 + cn_stride); |
| 119 | vst1_f32(c3, vacc3x01); |
| 120 | c3 = (float*) ((uintptr_t) c3 + cn_stride); |
| 121 | |
| 122 | a0 = (const float*) ((uintptr_t) a0 - kc); |
| 123 | a1 = (const float*) ((uintptr_t) a1 - kc); |
| 124 | a2 = (const float*) ((uintptr_t) a2 - kc); |
| 125 | a3 = (const float*) ((uintptr_t) a3 - kc); |
| 126 | |
| 127 | nc -= 2; |
| 128 | } else { |
| 129 | assert(nc == 1); |
| 130 | vst1_lane_f32(c0, vacc0x01, 0); |
| 131 | vst1_lane_f32(c1, vacc1x01, 0); |
| 132 | vst1_lane_f32(c2, vacc2x01, 0); |
| 133 | vst1_lane_f32(c3, vacc3x01, 0); |
| 134 | |
| 135 | nc = 0; |
| 136 | } |
| 137 | } while (nc != 0); |
| 138 | } |