Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/f32-raddstoreexpminusmax/sse2-p5.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2019 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <emmintrin.h> |
| 13 | |
| 14 | #include <xnnpack/common.h> |
| 15 | #include <xnnpack/raddstoreexpminusmax.h> |
| 16 | |
| 17 | |
| 18 | void xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x20_acc5( |
| 19 | size_t elements, |
| 20 | const float* input, |
| 21 | float* output, |
| 22 | float* sum, |
Marat Dukhan | b2217dd | 2020-05-28 17:30:28 -0700 | [diff] [blame] | 23 | float max) XNN_DISABLE_TSAN |
Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 24 | { |
| 25 | assert(elements % sizeof(float) == 0); |
| 26 | |
| 27 | const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f); |
| 28 | // The smallest x for which expf(x) is normalized. |
| 29 | const __m128 vdenorm_cutoff = _mm_set1_ps(-0x1.5D589Ep6f); |
| 30 | const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f); |
| 31 | // Last 7 bits are zeroes |
| 32 | const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f); |
| 33 | const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f); |
| 34 | |
| 35 | const __m128 vc1 = _mm_set1_ps(0x1.FFFFF6p-1f); |
| 36 | const __m128 vc2 = _mm_set1_ps(0x1.FFFDC6p-2f); |
| 37 | const __m128 vc3 = _mm_set1_ps(0x1.555A80p-3f); |
| 38 | const __m128 vc4 = _mm_set1_ps(0x1.573A1Ap-5f); |
| 39 | const __m128 vc5 = _mm_set1_ps(0x1.0F9F9Cp-7f); |
| 40 | |
| 41 | const __m128 vi_max = _mm_set1_ps(max); |
| 42 | |
| 43 | __m128 vacc0 = _mm_setzero_ps(); |
| 44 | __m128 vacc1 = _mm_setzero_ps(); |
| 45 | __m128 vacc2 = _mm_setzero_ps(); |
| 46 | __m128 vacc3 = _mm_setzero_ps(); |
| 47 | __m128 vacc4 = _mm_setzero_ps(); |
| 48 | for (; elements >= 20 * sizeof(float); elements -= 20 * sizeof(float)) { |
| 49 | // Load 20 (5x4) inputs at a time. |
| 50 | const __m128 vi0123 = _mm_loadu_ps(input); |
| 51 | const __m128 vi4567 = _mm_loadu_ps(input + 4); |
| 52 | const __m128 vi89AB = _mm_loadu_ps(input + 8); |
| 53 | const __m128 viCDEF = _mm_loadu_ps(input + 12); |
| 54 | const __m128 viGHIJ = _mm_loadu_ps(input + 16); |
| 55 | input += 20; |
| 56 | |
| 57 | // Subtract maximum input x := i - i_max. This implies x <= 0. |
| 58 | const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max); |
| 59 | const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max); |
| 60 | const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max); |
| 61 | const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max); |
| 62 | const __m128 vxGHIJ = _mm_sub_ps(viGHIJ, vi_max); |
| 63 | |
| 64 | // Compute reduced argument elements := round(x / log(2)). |
| 65 | __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias); |
| 66 | __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias); |
| 67 | __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias); |
| 68 | __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias); |
| 69 | __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vxGHIJ, vlog2e), vmagic_bias); |
| 70 | |
| 71 | // Create a floating-point number s (scale) such that s == 2**elements for inputs which don't cause underflow, i.e. |
| 72 | // -87.33642 <= x <= 0.0, and -126 <= elements <= 0 accordingly. |
| 73 | const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23)); |
| 74 | const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23)); |
| 75 | const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23)); |
| 76 | const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23)); |
| 77 | const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23)); |
| 78 | |
| 79 | // Subtract the large number back to get final elements := round(x / log(2)). |
| 80 | vn0123 = _mm_sub_ps(vn0123, vmagic_bias); |
| 81 | vn4567 = _mm_sub_ps(vn4567, vmagic_bias); |
| 82 | vn89AB = _mm_sub_ps(vn89AB, vmagic_bias); |
| 83 | vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias); |
| 84 | vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias); |
| 85 | |
| 86 | // Compute reduced argument t := x - elements * log(2). |
| 87 | // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy. |
| 88 | __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123); |
| 89 | __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567); |
| 90 | __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB); |
| 91 | __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF); |
| 92 | __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vxGHIJ); |
| 93 | |
| 94 | vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123); |
| 95 | vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567); |
| 96 | vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB); |
| 97 | vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF); |
| 98 | vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ); |
| 99 | |
Marat Dukhan | 102a739 | 2020-11-20 01:18:10 -0800 | [diff] [blame^] | 100 | // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]. |
Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 101 | __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4); |
| 102 | __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); |
| 103 | __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4); |
| 104 | __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4); |
| 105 | __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4); |
| 106 | |
| 107 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3); |
| 108 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); |
| 109 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3); |
| 110 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3); |
| 111 | vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3); |
| 112 | |
| 113 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2); |
| 114 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); |
| 115 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2); |
| 116 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2); |
| 117 | vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2); |
| 118 | |
| 119 | vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1); |
| 120 | vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1); |
| 121 | vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1); |
| 122 | vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1); |
| 123 | vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1); |
| 124 | |
| 125 | // Reconstruct the final f value: |
| 126 | // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))) |
| 127 | // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) |
| 128 | // = s + (t * s) * p |
| 129 | vt0123 = _mm_mul_ps(vt0123, vs0123); |
| 130 | vt4567 = _mm_mul_ps(vt4567, vs4567); |
| 131 | vt89AB = _mm_mul_ps(vt89AB, vs89AB); |
| 132 | vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF); |
| 133 | vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ); |
| 134 | |
| 135 | __m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123); |
| 136 | __m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567); |
| 137 | __m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB); |
| 138 | __m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF); |
| 139 | __m128 vfGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ); |
| 140 | |
| 141 | // For inputs below zero cutoff, replace output with +0.0f. |
| 142 | // Note that for NaN inputs, comparison result is false, and outputs are left unchanged. |
| 143 | vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123); |
| 144 | vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567); |
| 145 | vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB); |
| 146 | vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF); |
| 147 | vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vxGHIJ, vdenorm_cutoff), vfGHIJ); |
| 148 | |
| 149 | // Store 20 (5x4) outputs at a time. |
| 150 | _mm_storeu_ps(output, vf0123); |
| 151 | _mm_storeu_ps(output + 4, vf4567); |
| 152 | _mm_storeu_ps(output + 8, vf89AB); |
| 153 | _mm_storeu_ps(output + 12, vfCDEF); |
| 154 | _mm_storeu_ps(output + 16, vfGHIJ); |
| 155 | output += 20; |
| 156 | |
| 157 | // Accumulate computed exponents. |
| 158 | vacc0 = _mm_add_ps(vacc0, vf0123); |
| 159 | vacc4 = _mm_add_ps(vacc4, vf4567); |
| 160 | vacc3 = _mm_add_ps(vacc3, vf89AB); |
| 161 | vacc2 = _mm_add_ps(vacc2, vfCDEF); |
| 162 | vacc1 = _mm_add_ps(vacc1, vfGHIJ); |
| 163 | } |
| 164 | // Add up all accumulators to vacc0 |
| 165 | vacc0 = _mm_add_ps(vacc0, vacc1); |
| 166 | vacc2 = _mm_add_ps(vacc2, vacc3); |
| 167 | vacc0 = _mm_add_ps(vacc0, vacc2); |
| 168 | vacc0 = _mm_add_ps(vacc0, vacc4); |
| 169 | |
| 170 | __m128 vacc = vacc0; |
| 171 | for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) { |
| 172 | // Load 4 inputs at a time. |
| 173 | const __m128 vi = _mm_loadu_ps(input); |
| 174 | input += 4; |
| 175 | |
| 176 | // Subtract maximum input x := i - i_max. This implies x <= 0. |
| 177 | const __m128 vx = _mm_sub_ps(vi, vi_max); |
| 178 | |
| 179 | // Compute reduced argument elements := round(x / log(2)). |
| 180 | __m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias); |
| 181 | |
| 182 | // Create a floating-point number s (scale) such that s == 2**elements for inputs which don't cause underflow, i.e. |
| 183 | // -87.33642 <= x <= 0.0, and -126 <= elements <= 0 accordingly. |
| 184 | const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23)); |
| 185 | |
| 186 | // Subtract the large number back to get final elements := round(x / log(2)). |
| 187 | vn = _mm_sub_ps(vn, vmagic_bias); |
| 188 | |
| 189 | // Compute reduced argument t := x - elements * log(2). |
| 190 | // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy. |
| 191 | __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx); |
| 192 | vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); |
| 193 | |
Marat Dukhan | 102a739 | 2020-11-20 01:18:10 -0800 | [diff] [blame^] | 194 | // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]. |
Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 195 | __m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4); |
| 196 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); |
| 197 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); |
| 198 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1); |
| 199 | |
| 200 | // Reconstruct the final f value: |
| 201 | // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))) |
| 202 | // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) |
| 203 | // = s + (t * s) * p |
| 204 | vt = _mm_mul_ps(vt, vs); |
| 205 | __m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs); |
| 206 | |
| 207 | // For inputs below zero cutoff, replace output with +0.0f. |
| 208 | // Note that for NaN inputs, comparison result is false, and outputs are left unchanged. |
| 209 | vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf); |
| 210 | |
| 211 | // Store 4 outputs at a time. |
| 212 | _mm_storeu_ps(output, vf); |
| 213 | output += 4; |
| 214 | |
| 215 | // Accumulate computed exponents. |
| 216 | vacc = _mm_add_ps(vacc, vf); |
| 217 | } |
| 218 | if (elements != 0) { |
| 219 | assert(elements >= 1 * sizeof(float)); |
| 220 | assert(elements <= 3 * sizeof(float)); |
| 221 | // Load 4 inputs at a time. |
Marat Dukhan | b2217dd | 2020-05-28 17:30:28 -0700 | [diff] [blame] | 222 | const __m128 vi = _mm_loadu_ps(input); |
Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 223 | |
| 224 | // Subtract maximum input x := i - i_max. This implies x <= 0. |
| 225 | const __m128 vx = _mm_sub_ps(vi, vi_max); |
| 226 | |
| 227 | // Compute reduced argument elements := round(x / log(2)). |
| 228 | __m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias); |
| 229 | |
| 230 | // Create a floating-point number s (scale) such that s == 2**elements for inputs which don't cause underflow, i.e. |
| 231 | // -87.33642 <= x <= 0.0, and -126 <= elements <= 0 accordingly. |
| 232 | const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23)); |
| 233 | |
| 234 | // Subtract the large number back to get final elements := round(x / log(2)). |
| 235 | vn = _mm_sub_ps(vn, vmagic_bias); |
| 236 | |
| 237 | // Compute reduced argument t := x - elements * log(2). |
| 238 | // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy. |
| 239 | __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx); |
| 240 | vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt); |
| 241 | |
Marat Dukhan | 102a739 | 2020-11-20 01:18:10 -0800 | [diff] [blame^] | 242 | // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2]. |
Marat Dukhan | b39689d | 2020-01-24 13:32:20 -0800 | [diff] [blame] | 243 | __m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4); |
| 244 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3); |
| 245 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2); |
| 246 | vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1); |
| 247 | |
| 248 | // Reconstruct the final f value: |
| 249 | // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))) |
| 250 | // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) |
| 251 | // = s + (t * s) * p |
| 252 | vt = _mm_mul_ps(vt, vs); |
| 253 | __m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs); |
| 254 | |
| 255 | // For inputs below zero cutoff, replace output with +0.0f. |
| 256 | // Note that for NaN inputs, comparison result is false, and outputs are left unchanged. |
| 257 | vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf); |
| 258 | |
| 259 | if (elements & (2 * sizeof(float))) { |
| 260 | // Store 2 outputs at a time. |
| 261 | _mm_storel_pi((__m64*) output, vf); |
| 262 | output += 2; |
| 263 | |
| 264 | // Accumulate 2 computed exponents. |
| 265 | vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps())); |
| 266 | |
| 267 | vf = _mm_movehl_ps(vf, vf); |
| 268 | } |
| 269 | if (elements & (1 * sizeof(float))) { |
| 270 | // Store 1 output at a time. |
| 271 | _mm_store_ss(output, vf); |
| 272 | |
| 273 | // Accumulate 1 computed exponent. |
| 274 | vacc = _mm_add_ss(vacc, vf); |
| 275 | } |
| 276 | } |
| 277 | // Reduce 4 elements in the SIMD register |
| 278 | vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc)); |
| 279 | vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1))); |
| 280 | _mm_store_ss(sum, vacc); |
| 281 | } |