blob: ad165d604dc0fa90c28fb45166eecce37f5523fb [file] [log] [blame]
Marat Dukhan0f349c42019-11-27 11:58:54 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx512-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
Marat Dukhancfb31342019-12-05 10:42:57 -080015#include <xnnpack/intrinsics-polyfill.h>
Marat Dukhan0f349c42019-11-27 11:58:54 -080016
17
18void xnn_f32_gemm_ukernel_6x16__avx512f_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const float*restrict a,
23 size_t a_stride,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhan0f349c42019-11-27 11:58:54 -080029{
30 assert(mr != 0);
31 assert(mr <= 6);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const float* a0 = a;
40 float* c0 = c;
41 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr < 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
61 if XNN_UNPREDICTABLE(mr <= 4) {
62 a4 = a3;
63 c4 = c3;
64 }
65 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
66 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
67 if XNN_UNPREDICTABLE(mr != 6) {
68 a5 = a4;
69 c5 = c4;
70 }
71
72 do {
73 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
74 __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
75 __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
76 __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
77 __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
78 __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
79 w += 16;
80
81 size_t k = kc;
82 do {
83 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
84 w += 16;
85
86 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
87 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
88 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
89 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
90 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
91 vacc5x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a5), vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
92
93 a0 += 1;
94 a1 += 1;
95 a2 += 1;
96 a3 += 1;
97 a4 += 1;
98 a5 += 1;
99
100 k -= sizeof(float);
101 } while (k != 0);
102
103 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
104 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
105 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
106 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
107 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
108 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
109 vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
110
111 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
112 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
113 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
114 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
115 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
116 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
117 vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
118
119 if XNN_LIKELY(nc >= 16) {
120 _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
121 c5 = (float*) ((uintptr_t) c5 + cn_stride);
122 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
123 c4 = (float*) ((uintptr_t) c4 + cn_stride);
124 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
125 c3 = (float*) ((uintptr_t) c3 + cn_stride);
126 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
127 c2 = (float*) ((uintptr_t) c2 + cn_stride);
128 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
129 c1 = (float*) ((uintptr_t) c1 + cn_stride);
130 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
131 c0 = (float*) ((uintptr_t) c0 + cn_stride);
132
133 a5 = (const float*) ((uintptr_t) a5 - kc);
134 a4 = (const float*) ((uintptr_t) a4 - kc);
135 a3 = (const float*) ((uintptr_t) a3 - kc);
136 a2 = (const float*) ((uintptr_t) a2 - kc);
137 a1 = (const float*) ((uintptr_t) a1 - kc);
138 a0 = (const float*) ((uintptr_t) a0 - kc);
139
140 nc -= 16;
141 } else {
142 if (nc & 15) {
143 // Prepare mask for valid 32-bit elements (depends on nc).
144 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
145
146 _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
147 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
148 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
149 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
150 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
151 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
152 }
153
154 nc = 0;
155 }
156 } while (nc != 0);
157}