Marat Dukhan | ed6baaf | 2020-12-01 15:07:08 -0800 | [diff] [blame] | 1 | // Copyright 2020 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | $assert BATCH_TILE % 8 == 0 |
| 7 | $assert BATCH_TILE >= 8 |
| 8 | $SIMD_TILE = BATCH_TILE // 8 |
| 9 | #include <assert.h> |
| 10 | |
| 11 | #include <immintrin.h> |
| 12 | |
| 13 | #include <xnnpack/common.h> |
| 14 | #include <xnnpack/vunary.h> |
| 15 | |
| 16 | |
| 17 | static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0}; |
| 18 | |
| 19 | void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x${BATCH_TILE}( |
| 20 | size_t n, |
| 21 | const float* x, |
| 22 | float* y, |
| 23 | const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| 24 | { |
| 25 | assert(n % sizeof(float) == 0); |
| 26 | |
| 27 | const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale); |
| 28 | const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha); |
| 29 | const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta); |
| 30 | |
| 31 | const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f); |
| 32 | const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f); |
| 33 | const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f); |
| 34 | const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3)); |
| 35 | const __m256 vtable = _mm256_set_ps( |
| 36 | 0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f, |
| 37 | 0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f); |
| 38 | const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f); |
| 39 | const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f); |
| 40 | const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f); |
| 41 | const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f); |
| 42 | const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f); |
| 43 | const __m256 vone = _mm256_set1_ps(1.0f); |
| 44 | |
| 45 | $if BATCH_TILE > 8: |
| 46 | for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| 47 | __m256 vx0 = _mm256_loadu_ps(x); |
| 48 | $for N in range(1, SIMD_TILE): |
| 49 | __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8}); |
| 50 | x += ${BATCH_TILE}; |
| 51 | |
| 52 | $for N in range(SIMD_TILE): |
| 53 | const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale)); |
| 54 | |
| 55 | $for N in range(SIMD_TILE): |
| 56 | __m256 vn${N} = _mm256_add_ps(_mm256_mul_ps(vz${N}, vlog2e), vmagic_bias); |
| 57 | |
| 58 | $for N in range(SIMD_TILE): |
| 59 | __m256 ven${N} = _mm256_andnot_ps(vindex_mask, vn${N}); |
| 60 | const __m256 vl${N} = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn${N})); |
| 61 | const __m128 ven${N}_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven${N})), 21)); |
| 62 | |
| 63 | $for N in range(SIMD_TILE): |
| 64 | vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias); |
| 65 | const __m128 ven${N}_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven${N}, 1)), 21)); |
| 66 | |
| 67 | $for N in range(SIMD_TILE): |
| 68 | __m256 vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_hi), vz${N}); |
| 69 | ven${N} = _mm256_insertf128_ps(_mm256_castps128_ps256(ven${N}_lo), ven${N}_hi, 1); |
| 70 | |
| 71 | $for N in range(SIMD_TILE): |
| 72 | vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_lo), vt${N}); |
| 73 | __m256 vs${N} = _mm256_mul_ps(vl${N}, ven${N}); |
| 74 | |
| 75 | $for N in range(SIMD_TILE): |
| 76 | __m256 vp${N} = _mm256_add_ps(_mm256_mul_ps(vc4, vt${N}), vc3); |
| 77 | |
| 78 | $for N in range(SIMD_TILE): |
| 79 | vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vc2); |
| 80 | |
| 81 | $for N in range(SIMD_TILE): |
| 82 | vp${N} = _mm256_mul_ps(vp${N}, vt${N}); |
| 83 | |
| 84 | $for N in range(SIMD_TILE): |
| 85 | vt${N} = _mm256_mul_ps(vt${N}, vs${N}); |
| 86 | vs${N} = _mm256_sub_ps(vs${N}, vone); |
| 87 | |
| 88 | $for N in range(SIMD_TILE): |
| 89 | vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vt${N}); |
| 90 | |
| 91 | $for N in range(SIMD_TILE): |
| 92 | const __m256 ve${N} = _mm256_mul_ps(_mm256_add_ps(vp${N}, vs${N}), valpha); |
| 93 | vx${N} = _mm256_mul_ps(vx${N}, vbeta); |
| 94 | |
| 95 | $for N in range(SIMD_TILE): |
| 96 | const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N}); |
| 97 | |
| 98 | _mm256_storeu_ps(y, vy0); |
| 99 | $for N in range(1, SIMD_TILE): |
| 100 | _mm256_storeu_ps(y + ${N * 8}, vy${N}); |
| 101 | y += ${BATCH_TILE}; |
| 102 | } |
| 103 | for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { |
| 104 | __m256 vx = _mm256_loadu_ps(x); |
| 105 | x += 8; |
| 106 | |
| 107 | const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); |
| 108 | |
| 109 | __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); |
| 110 | __m256 ven = _mm256_andnot_ps(vindex_mask, vn); |
| 111 | const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)); |
| 112 | const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); |
| 113 | vn = _mm256_sub_ps(vn, vmagic_bias); |
| 114 | const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); |
| 115 | |
| 116 | __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz); |
| 117 | ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); |
| 118 | vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt); |
| 119 | __m256 vs = _mm256_mul_ps(vl, ven); |
| 120 | |
| 121 | __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3); |
| 122 | vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2); |
| 123 | vp = _mm256_mul_ps(vp, vt); |
| 124 | |
| 125 | vt = _mm256_mul_ps(vt, vs); |
| 126 | vs = _mm256_sub_ps(vs, vone); |
| 127 | vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt); |
| 128 | |
| 129 | const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha); |
| 130 | vx = _mm256_mul_ps(vx, vbeta); |
| 131 | const __m256 vy = _mm256_blendv_ps(vx, ve, vx); |
| 132 | |
| 133 | _mm256_storeu_ps(y, vy); |
| 134 | y += 8; |
| 135 | } |
| 136 | if XNN_UNLIKELY(n != 0) { |
| 137 | assert(n >= 1 * sizeof(float)); |
| 138 | assert(n <= 7 * sizeof(float)); |
| 139 | __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n)); |
| 140 | |
| 141 | __m256 vx = _mm256_maskload_ps(x, vmask); |
| 142 | |
| 143 | const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); |
| 144 | |
| 145 | __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); |
| 146 | __m256 ven = _mm256_andnot_ps(vindex_mask, vn); |
| 147 | const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)); |
| 148 | const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); |
| 149 | vn = _mm256_sub_ps(vn, vmagic_bias); |
| 150 | const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); |
| 151 | |
| 152 | __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz); |
| 153 | ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); |
| 154 | vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt); |
| 155 | __m256 vs = _mm256_mul_ps(vl, ven); |
| 156 | |
| 157 | __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3); |
| 158 | vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2); |
| 159 | vp = _mm256_mul_ps(vp, vt); |
| 160 | |
| 161 | vt = _mm256_mul_ps(vt, vs); |
| 162 | vs = _mm256_sub_ps(vs, vone); |
| 163 | vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt); |
| 164 | |
| 165 | const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha); |
| 166 | vx = _mm256_mul_ps(vx, vbeta); |
| 167 | const __m256 vy = _mm256_blendv_ps(vx, ve, vx); |
| 168 | |
| 169 | // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug). |
| 170 | __m128 vy_lo = _mm256_castps256_ps128(vy); |
| 171 | if (n & (4 * sizeof(float))) { |
| 172 | _mm_storeu_ps(y, vy_lo); |
| 173 | vy_lo = _mm256_extractf128_ps(vy, 1); |
| 174 | y += 4; |
| 175 | } |
| 176 | if (n & (2 * sizeof(float))) { |
| 177 | _mm_storel_pi((__m64*) y, vy_lo); |
| 178 | vy_lo = _mm_movehl_ps(vy_lo, vy_lo); |
| 179 | y += 2; |
| 180 | } |
| 181 | if (n & (1 * sizeof(float))) { |
| 182 | _mm_store_ss(y, vy_lo); |
| 183 | } |
| 184 | } |
| 185 | } |