blob: 6cbb95b2e2ec92421b6a5086ca1ac66b9f83d39d [file] [log] [blame]
Marat Dukhan0f349c42019-11-27 11:58:54 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-igemm/avx512-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/igemm.h>
Marat Dukhancfb31342019-12-05 10:42:57 -080015#include <xnnpack/intrinsics-polyfill.h>
Marat Dukhan0f349c42019-11-27 11:58:54 -080016
17
18void xnn_f32_igemm_ukernel_5x16__avx512f_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const float**restrict a,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const float* zero,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070030 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhan0f349c42019-11-27 11:58:54 -080031{
32 assert(mr != 0);
33 assert(mr <= 5);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(float) == 0);
37 assert(ks != 0);
38 assert(ks % (5 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(float) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 float* c0 = c;
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61
62 do {
63 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
64 __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
65 __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
66 __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
67 __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
68 w += 16;
69
70 size_t p = ks;
71 do {
72 const float* restrict a0 = a[0];
73 assert(a0 != NULL);
74 if XNN_UNPREDICTABLE(a0 != zero) {
75 a0 = (const float*) ((uintptr_t) a0 + a_offset);
76 }
77 const float* restrict a1 = a[1];
78 assert(a1 != NULL);
79 if XNN_UNPREDICTABLE(a1 != zero) {
80 a1 = (const float*) ((uintptr_t) a1 + a_offset);
81 }
82 const float* restrict a2 = a[2];
83 assert(a2 != NULL);
84 if XNN_UNPREDICTABLE(a2 != zero) {
85 a2 = (const float*) ((uintptr_t) a2 + a_offset);
86 }
87 const float* restrict a3 = a[3];
88 assert(a3 != NULL);
89 if XNN_UNPREDICTABLE(a3 != zero) {
90 a3 = (const float*) ((uintptr_t) a3 + a_offset);
91 }
92 const float* restrict a4 = a[4];
93 assert(a4 != NULL);
94 if XNN_UNPREDICTABLE(a4 != zero) {
95 a4 = (const float*) ((uintptr_t) a4 + a_offset);
96 }
97 a += 5;
98
99 size_t k = kc;
100 do {
101 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
102 w += 16;
103
104 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
105 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
106 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
107 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
108 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
109
110 a0 += 1;
111 a1 += 1;
112 a2 += 1;
113 a3 += 1;
114 a4 += 1;
115
116 k -= sizeof(float);
117 } while (k != 0);
118 p -= 5 * sizeof(void*);
119 } while (p != 0);
120
121 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
122 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
123 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
124 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
125 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
126 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
127
128 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
129 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
130 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
131 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
132 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
133 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
134
135 if XNN_LIKELY(nc >= 16) {
136 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
137 c4 = (float*) ((uintptr_t) c4 + cn_stride);
138 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
139 c3 = (float*) ((uintptr_t) c3 + cn_stride);
140 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
141 c2 = (float*) ((uintptr_t) c2 + cn_stride);
142 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
143 c1 = (float*) ((uintptr_t) c1 + cn_stride);
144 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
145 c0 = (float*) ((uintptr_t) c0 + cn_stride);
146
147 a = (const float**restrict) ((uintptr_t) a - ks);
148 nc -= 16;
149 } else {
150 if (nc & 15) {
151 // Prepare mask for valid 32-bit elements (depends on nc).
152 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
153
154 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
155 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
156 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
157 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
158 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
159 }
160
161 nc = 0;
162 }
163 } while (nc != 0);
164}