Marat Dukhan | ed6baaf | 2020-12-01 15:07:08 -0800 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/f32-velu/sse-rr2-p6.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2020 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <smmintrin.h> |
| 13 | |
| 14 | #include <xnnpack/vunary.h> |
| 15 | #include <xnnpack/common.h> |
| 16 | |
| 17 | |
| 18 | void xnn_f32_velu_ukernel__sse41_rr2_p6_x16( |
| 19 | size_t n, |
| 20 | const float* x, |
| 21 | float* y, |
| 22 | const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN |
| 23 | { |
| 24 | assert(n != 0); |
| 25 | assert(n % sizeof(float) == 0); |
| 26 | assert(x != NULL); |
| 27 | assert(y != NULL); |
| 28 | |
| 29 | const __m128 vprescale = _mm_load_ps(params->sse.prescale); |
| 30 | const __m128 valpha = _mm_load_ps(params->sse.alpha); |
| 31 | const __m128 vbeta = _mm_load_ps(params->sse.beta); |
| 32 | |
| 33 | const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f); |
| 34 | const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f); |
| 35 | const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f); |
| 36 | const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f); |
| 37 | const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f); |
| 38 | const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f); |
| 39 | const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f); |
| 40 | const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f); |
| 41 | const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f); |
| 42 | const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f); |
| 43 | const __m128 vone = _mm_set1_ps(1.0f); |
| 44 | |
| 45 | for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { |
| 46 | __m128 vx0123 = _mm_loadu_ps(x); |
| 47 | __m128 vx4567 = _mm_loadu_ps(x + 4); |
| 48 | __m128 vx89AB = _mm_loadu_ps(x + 8); |
| 49 | __m128 vxCDEF = _mm_loadu_ps(x + 12); |
| 50 | x += 16; |
| 51 | |
| 52 | const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale)); |
| 53 | const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale)); |
| 54 | const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale)); |
| 55 | const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale)); |
| 56 | |
| 57 | __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias); |
| 58 | __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias); |
| 59 | __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias); |
| 60 | __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias); |
| 61 | |
| 62 | __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23)); |
| 63 | __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23)); |
| 64 | __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23)); |
| 65 | __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23)); |
| 66 | |
| 67 | vn0123 = _mm_sub_ps(vn0123, vmagic_bias); |
| 68 | vn4567 = _mm_sub_ps(vn4567, vmagic_bias); |
| 69 | vn89AB = _mm_sub_ps(vn89AB, vmagic_bias); |
| 70 | vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias); |
| 71 | |
| 72 | __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123); |
| 73 | __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567); |
| 74 | __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB); |
| 75 | __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF); |
| 76 | |
| 77 | vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123); |
| 78 | vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567); |
| 79 | vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB); |
| 80 | vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF); |
| 81 | |
| 82 | __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5); |
| 83 | __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); |
| 84 | __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5); |
| 85 | __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5); |
| 86 | |
| 87 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4); |
| 88 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); |
| 89 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4); |
| 90 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4); |
| 91 | |
| 92 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3); |
| 93 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); |
| 94 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3); |
| 95 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3); |
| 96 | |
| 97 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2); |
| 98 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); |
| 99 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2); |
| 100 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2); |
| 101 | |
| 102 | vp0123 = _mm_mul_ps(vp0123, vt0123); |
| 103 | vp4567 = _mm_mul_ps(vp4567, vt4567); |
| 104 | vp89AB = _mm_mul_ps(vp89AB, vt89AB); |
| 105 | vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF); |
| 106 | |
| 107 | vt0123 = _mm_mul_ps(vt0123, vs0123); |
| 108 | vs0123 = _mm_sub_ps(vs0123, vone); |
| 109 | vt4567 = _mm_mul_ps(vt4567, vs4567); |
| 110 | vs4567 = _mm_sub_ps(vs4567, vone); |
| 111 | vt89AB = _mm_mul_ps(vt89AB, vs89AB); |
| 112 | vs89AB = _mm_sub_ps(vs89AB, vone); |
| 113 | vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF); |
| 114 | vsCDEF = _mm_sub_ps(vsCDEF, vone); |
| 115 | |
| 116 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123); |
| 117 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); |
| 118 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB); |
| 119 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF); |
| 120 | |
| 121 | const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha); |
| 122 | const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); |
| 123 | const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha); |
| 124 | const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha); |
| 125 | |
| 126 | vx0123 = _mm_mul_ps(vx0123, vbeta); |
| 127 | vx4567 = _mm_mul_ps(vx4567, vbeta); |
| 128 | vx89AB = _mm_mul_ps(vx89AB, vbeta); |
| 129 | vxCDEF = _mm_mul_ps(vxCDEF, vbeta); |
| 130 | |
| 131 | const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123); |
| 132 | const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567); |
| 133 | const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB); |
| 134 | const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF); |
| 135 | |
| 136 | _mm_storeu_ps(y, vy0123); |
| 137 | _mm_storeu_ps(y + 4, vy4567); |
| 138 | _mm_storeu_ps(y + 8, vy89AB); |
| 139 | _mm_storeu_ps(y + 12, vyCDEF); |
| 140 | y += 16; |
| 141 | } |
| 142 | for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { |
| 143 | __m128 vx = _mm_loadu_ps(x); |
| 144 | x += 4; |
| 145 | |
| 146 | const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale)); |
| 147 | |
| 148 | __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias); |
| 149 | __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23)); |
| 150 | vn = _mm_sub_ps(vn, vmagic_bias); |
| 151 | |
| 152 | __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); |
| 153 | vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); |
| 154 | |
| 155 | __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5); |
| 156 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4); |
| 157 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); |
| 158 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); |
| 159 | vp = _mm_mul_ps(vp, vt); |
| 160 | |
| 161 | vt = _mm_mul_ps(vt, vs); |
| 162 | vs = _mm_sub_ps(vs, vone); |
| 163 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt); |
| 164 | const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha); |
| 165 | |
| 166 | vx = _mm_mul_ps(vx, vbeta); |
| 167 | const __m128 vy = _mm_blendv_ps(vx, ve, vx); |
| 168 | |
| 169 | _mm_storeu_ps(y, vy); |
| 170 | y += 4; |
| 171 | } |
| 172 | if XNN_UNLIKELY(n != 0) { |
| 173 | __m128 vx = _mm_loadu_ps(x); |
| 174 | |
| 175 | const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale)); |
| 176 | |
| 177 | __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias); |
| 178 | __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23)); |
| 179 | vn = _mm_sub_ps(vn, vmagic_bias); |
| 180 | |
| 181 | __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz); |
| 182 | vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); |
| 183 | |
| 184 | __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5); |
| 185 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4); |
| 186 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); |
| 187 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); |
| 188 | vp = _mm_mul_ps(vp, vt); |
| 189 | |
| 190 | vt = _mm_mul_ps(vt, vs); |
| 191 | vs = _mm_sub_ps(vs, vone); |
| 192 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt); |
| 193 | const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha); |
| 194 | |
| 195 | vx = _mm_mul_ps(vx, vbeta); |
| 196 | __m128 vy = _mm_blendv_ps(vx, ve, vx); |
| 197 | |
| 198 | if (n & (2 * sizeof(float))) { |
| 199 | _mm_storel_pi((__m64*) y, vy); |
| 200 | vy = _mm_movehl_ps(vy, vy); |
| 201 | y += 2; |
| 202 | } |
| 203 | if (n & (1 * sizeof(float))) { |
| 204 | _mm_store_ss(y, vy); |
| 205 | } |
| 206 | } |
| 207 | } |