| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 4 == 0 |
| $assert BATCH_TILE >= 4 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <emmintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/math.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| void xnn_f32_vrndu_ukernel__sse2_x${BATCH_TILE}( |
| size_t n, |
| const float* x, |
| float* y, |
| const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN |
| { |
| assert(n != 0); |
| assert(n % sizeof(float) == 0); |
| |
| const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask); |
| const __m128 vone = _mm_load_ps(params->sse2.one); |
| for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); |
| $for N in range(4, BATCH_TILE, 4): |
| const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128i vintx${ABC[N:N+4]} = _mm_cvttps_epi32(vx${ABC[N:N+4]}); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vrndmask${ABC[N:N+4]} = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx${ABC[N:N+4]}, vmagic))); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vprerndx${ABC[N:N+4]} = _mm_cvtepi32_ps(vintx${ABC[N:N+4]}); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vrndx${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(vx${ABC[N:N+4]}, vrndmask${ABC[N:N+4]}), _mm_andnot_ps(vrndmask${ABC[N:N+4]}, vprerndx${ABC[N:N+4]})); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vadjmask${ABC[N:N+4]} = _mm_or_ps(_mm_cmpge_ps(vrndx${ABC[N:N+4]}, vx${ABC[N:N+4]}), _mm_castsi128_ps(vmagic)); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vadjrndx${ABC[N:N+4]} = _mm_add_ps(vrndx${ABC[N:N+4]}, vone); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| const __m128 vy${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(vrndx${ABC[N:N+4]}, vadjmask${ABC[N:N+4]}), _mm_andnot_ps(vadjmask${ABC[N:N+4]}, vadjrndx${ABC[N:N+4]})); |
| |
| _mm_storeu_ps(y, vy${ABC[0:4]}); |
| $for N in range(4, BATCH_TILE, 4): |
| _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]}); |
| y += ${BATCH_TILE}; |
| } |
| $if BATCH_TILE > 4: |
| for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { |
| const __m128 vx = _mm_loadu_ps(x); |
| x += 4; |
| |
| const __m128i vintx = _mm_cvttps_epi32(vx); |
| const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); |
| const __m128 vprerndx = _mm_cvtepi32_ps(vintx); |
| const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx)); |
| const __m128 vadjmask = _mm_or_ps(_mm_cmpge_ps(vrndx, vx), _mm_castsi128_ps(vmagic)); |
| const __m128 vadjrndx = _mm_add_ps(vrndx, vone); |
| const __m128 vy = _mm_or_ps(_mm_and_ps(vrndx, vadjmask), _mm_andnot_ps(vadjmask, vadjrndx)); |
| |
| _mm_storeu_ps(y, vy); |
| y += 4; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| const __m128 vx = _mm_loadu_ps(x); |
| const __m128i vintx = _mm_cvttps_epi32(vx); |
| const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic))); |
| const __m128 vprerndx = _mm_cvtepi32_ps(vintx); |
| const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx)); |
| const __m128 vadjmask = _mm_or_ps(_mm_cmpge_ps(vrndx, vx), _mm_castsi128_ps(vmagic)); |
| const __m128 vadjrndx = _mm_add_ps(vrndx, vone); |
| __m128 vy = _mm_or_ps(_mm_and_ps(vrndx, vadjmask), _mm_andnot_ps(vadjmask, vadjrndx)); |
| if (n & (2 * sizeof(float))) { |
| _mm_storel_pi((__m64*) y, vy); |
| vy = _mm_movehl_ps(vy, vy); |
| y += 2; |
| } |
| if (n & (1 * sizeof(float))) { |
| _mm_store_ss(y, vy); |
| } |
| } |
| } |