blob: aab6f55ba8b5b899a34f51a500a1d5ab7d6d9bf4 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/MRx2-neon-ld64.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
Marat Dukhanc72fa1e2019-11-27 11:54:03 -080014#include <xnnpack/common.h>
XNNPACK Teamb455b122019-09-27 18:10:33 -070015#include <xnnpack/gemm.h>
16
17
Frank Barchard91317c52019-11-22 10:54:35 -080018void xnn_f32_gemm_ukernel_4x2__neonfma_lane_ld64(
XNNPACK Teamb455b122019-09-27 18:10:33 -070019 size_t mr,
20 size_t nc,
21 size_t kc,
22 const float* restrict a,
23 size_t a_stride,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
XNNPACK Teamb455b122019-09-27 18:10:33 -070029{
30 assert(mr != 0);
31 assert(mr <= 4);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const float* a0 = a;
40 float* c0 = c;
41 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr != 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59
60 do {
61 float32x2_t vacc0x01 = vld1_f32(w); w += 2;
62 float32x2_t vacc1x01 = vacc0x01;
63 float32x2_t vacc2x01 = vacc0x01;
64 float32x2_t vacc3x01 = vacc0x01;
65
66 size_t k = kc;
67 for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
68 const float32x2_t va0 = vld1_f32(a0); a0 += 2;
69 const float32x2_t va1 = vld1_f32(a1); a1 += 2;
70 const float32x2_t va2 = vld1_f32(a2); a2 += 2;
71 const float32x2_t va3 = vld1_f32(a3); a3 += 2;
72
73 const float32x2_t vb01c0 = vld1_f32(w); w += 2;
74
Marat Dukhanc72fa1e2019-11-27 11:54:03 -080075 #if XNN_ARCH_ARM64
XNNPACK Teamb455b122019-09-27 18:10:33 -070076 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
77 vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
78 vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
79 vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
80 #else
81 const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
82 const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
83 const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
84 const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
85 vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
86 vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
87 vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
88 vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
89 #endif
90 const float32x2_t vb01c1 = vld1_f32(w); w += 2;
91
Marat Dukhanc72fa1e2019-11-27 11:54:03 -080092 #if XNN_ARCH_ARM64
XNNPACK Teamb455b122019-09-27 18:10:33 -070093 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
94 vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
95 vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
96 vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
97 #else
98 const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
99 const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
100 const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
101 const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
102 vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
103 vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
104 vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
105 vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
106 #endif
107 }
108 if XNN_UNLIKELY(k != 0) {
109 const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
110 const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
111 const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
112 const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
113
114 const float32x2_t vb01 = vld1_f32(w); w += 2;
115
116 vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
117 vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
118 vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
119 vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
120 }
121
122 const float32x2_t vmax = vld1_dup_f32(&params->scalar.max);
123 vacc0x01 = vmin_f32(vacc0x01, vmax);
124 vacc1x01 = vmin_f32(vacc1x01, vmax);
125 vacc2x01 = vmin_f32(vacc2x01, vmax);
126 vacc3x01 = vmin_f32(vacc3x01, vmax);
127
128 const float32x2_t vmin = vld1_dup_f32(&params->scalar.min);
129 vacc0x01 = vmax_f32(vacc0x01, vmin);
130 vacc1x01 = vmax_f32(vacc1x01, vmin);
131 vacc2x01 = vmax_f32(vacc2x01, vmin);
132 vacc3x01 = vmax_f32(vacc3x01, vmin);
133
134 if XNN_LIKELY(nc >= 2) {
135 vst1_f32(c0, vacc0x01);
136 c0 = (float*) ((uintptr_t) c0 + cn_stride);
137 vst1_f32(c1, vacc1x01);
138 c1 = (float*) ((uintptr_t) c1 + cn_stride);
139 vst1_f32(c2, vacc2x01);
140 c2 = (float*) ((uintptr_t) c2 + cn_stride);
141 vst1_f32(c3, vacc3x01);
142 c3 = (float*) ((uintptr_t) c3 + cn_stride);
143
144 a0 = (const float*) ((uintptr_t) a0 - kc);
145 a1 = (const float*) ((uintptr_t) a1 - kc);
146 a2 = (const float*) ((uintptr_t) a2 - kc);
147 a3 = (const float*) ((uintptr_t) a3 - kc);
148
149 nc -= 2;
150 } else {
151 assert(nc == 1);
152 vst1_lane_f32(c0, vacc0x01, 0);
153 vst1_lane_f32(c1, vacc1x01, 0);
154 vst1_lane_f32(c2, vacc2x01, 0);
155 vst1_lane_f32(c3, vacc3x01, 0);
156
157 nc = 0;
158 }
159 } while (nc != 0);
160}