blob: b866c1f250dd7a14971be89c18729da43d6f4c34 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-spmm/neon-pipelined.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/spmm.h>
15
16
Marat Dukhan355ab432020-04-09 19:01:52 -070017void xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined(
XNNPACK Teamb455b122019-09-27 18:10:33 -070018 uint32_t m,
19 uint32_t n,
20 const float*restrict a,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict c,
Marat Dukhanf196d012020-04-15 11:50:03 -070025 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
XNNPACK Teamb455b122019-09-27 18:10:33 -070026{
27 assert(m != 0);
28
Frank Barchardfcfdc0e2019-10-21 15:58:42 -070029 const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
30 const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
XNNPACK Teamb455b122019-09-27 18:10:33 -070031 size_t i = m;
32 while XNN_LIKELY(i >= 8) {
33 const float*restrict w = weights;
34 const int32_t* dmap = widx_dmap;
35 const uint32_t* nnzmap = nidx_nnzmap;
36 float32x4_t vw = vld1q_dup_f32(w); w += 1;
37 intptr_t diff = *dmap++;
38 float32x4_t va0123 = vld1q_f32(a);
39 float32x4_t va4567 = vld1q_f32(a + 4);
40 size_t j = n;
41 do {
42 uint32_t nnz = *nnzmap++;
43 float32x4_t vacc0123 = vw;
44 float32x4_t vacc4567 = vw;
45 vw = vld1q_dup_f32(w); w += 1;
46 if XNN_LIKELY(nnz != 0) {
47 do {
48 vacc0123 = vfmaq_f32(vacc0123, va0123, vw);
49 vacc4567 = vfmaq_f32(vacc4567, va4567, vw);
50 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
51
52 diff = *dmap++;
53 vw = vld1q_dup_f32(w); w += 1;
54 va0123 = vld1q_f32(a);
55 va4567 = vld1q_f32(a + 4);
56 } while (--nnz != 0);
57 }
Frank Barchardfcfdc0e2019-10-21 15:58:42 -070058 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
59 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
60 vout0123 = vmaxq_f32(vout0123, vmin);
61 vout4567 = vmaxq_f32(vout4567, vmin);
XNNPACK Teamb455b122019-09-27 18:10:33 -070062 vst1q_f32(c, vout0123);
63 vst1q_f32(c + 4, vout4567);
64 c += m;
65 } while (--j != 0);
66 c -= m * n;
67 c += 8;
68 a += 8;
69 i -= 8;
70 }
71 if XNN_UNLIKELY(i != 0) {
72 if (i & 4) {
73 const float*restrict w = weights;
74 const int32_t* dmap = widx_dmap;
75 const uint32_t* nnzmap = nidx_nnzmap;
76 size_t j = n;
77 do {
78 uint32_t nnz = *nnzmap++;
79 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
80 if XNN_LIKELY(nnz != 0) {
81 do {
82 const intptr_t diff = *dmap++;
83 const float32x4_t va0123 = vld1q_f32(a);
84 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
85 const float32x4_t vb = vld1q_dup_f32(w); w += 1;
86 vacc0123 = vfmaq_f32(vacc0123, va0123, vb);
87 } while (--nnz != 0);
88 }
Frank Barchardfcfdc0e2019-10-21 15:58:42 -070089 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
90 vout0123 = vmaxq_f32(vout0123, vmin);
XNNPACK Teamb455b122019-09-27 18:10:33 -070091 vst1q_f32(c, vout0123);
92 c += m;
93 } while (--j != 0);
94 c -= m * n;
95 c += 4;
96 a += 4;
97 }
98 if (i & 2) {
99 const float*restrict w = weights;
100 const int32_t* dmap = widx_dmap;
101 const uint32_t* nnzmap = nidx_nnzmap;
102 size_t j = n;
103 do {
104 uint32_t nnz = *nnzmap++;
105 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
106 if XNN_LIKELY(nnz != 0) {
107 do {
108 const intptr_t diff = *dmap++;
109 const float32x2_t va01 = vld1_f32(a);
110 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
111 const float32x2_t vb = vld1_dup_f32(w); w += 1;
112 vacc01 = vfma_f32(vacc01, va01, vb);
113 } while (--nnz != 0);
114 }
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700115 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
116 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700117 vst1_f32(c, vout01);
118 c += m;
119 } while (--j != 0);
120 c -= m * n;
121 c += 2;
122 a += 2;
123 }
124 if (i & 1) {
125 const float*restrict w = weights;
126 const int32_t* dmap = widx_dmap;
127 const uint32_t* nnzmap = nidx_nnzmap;
128 size_t j = n;
129 do {
130 uint32_t nnz = *nnzmap++;
131 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
132 if XNN_LIKELY(nnz != 0) {
133 do {
134 const intptr_t diff = *dmap++;
135 const float32x2_t va0 = vld1_dup_f32(a);
136 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
137 const float32x2_t vb = vld1_dup_f32(w); w += 1;
138 vacc0 = vfma_f32(vacc0, va0, vb);
139 } while (--nnz != 0);
140 }
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700141 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
142 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700143 vst1_lane_f32(c, vout0, 0);
144 c += m;
145 } while (--j != 0);
146 c -= m * n;
147 c += 1;
148 a += 1;
149 }
150 }
151}