Marat Dukhan | 5c5fa96 | 2020-03-10 18:38:33 -0700 | [diff] [blame] | 1 | // Copyright 2020 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | $assert BATCH_TILE % 4 == 0 |
| 7 | $assert BATCH_TILE >= 4 |
| 8 | $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| 9 | #include <assert.h> |
| 10 | |
| 11 | #include <xmmintrin.h> |
| 12 | |
| 13 | #include <xnnpack/clamp.h> |
| 14 | #include <xnnpack/common.h> |
| 15 | |
| 16 | |
| 17 | void xnn_f32_clamp_ukernel__sse_x${BATCH_TILE}( |
| 18 | size_t n, |
| 19 | const float* x, |
| 20 | float* y, |
Marat Dukhan | b2217dd | 2020-05-28 17:30:28 -0700 | [diff] [blame^] | 21 | const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN |
Marat Dukhan | 5c5fa96 | 2020-03-10 18:38:33 -0700 | [diff] [blame] | 22 | { |
| 23 | assert(n != 0); |
| 24 | assert(n % sizeof(float) == 0); |
| 25 | |
| 26 | const __m128 vy_min = _mm_load_ps(params->sse.min); |
| 27 | const __m128 vy_max = _mm_load_ps(params->sse.max); |
| 28 | |
| 29 | for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| 30 | __m128 vacc${ABC[0:4]} = _mm_loadu_ps(x); |
| 31 | $for N in range(4, BATCH_TILE, 4): |
| 32 | __m128 vacc${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); |
| 33 | x += ${BATCH_TILE}; |
| 34 | |
| 35 | $for N in range(0, BATCH_TILE, 4): |
| 36 | vacc${ABC[N:N+4]} = _mm_max_ps(vacc${ABC[N:N+4]}, vy_min); |
| 37 | |
| 38 | $for N in range(0, BATCH_TILE, 4): |
| 39 | vacc${ABC[N:N+4]} = _mm_min_ps(vacc${ABC[N:N+4]}, vy_max); |
| 40 | |
| 41 | _mm_storeu_ps(y, vacc${ABC[0:4]}); |
| 42 | $for N in range(4, BATCH_TILE, 4): |
| 43 | _mm_storeu_ps(y + ${N}, vacc${ABC[N:N+4]}); |
| 44 | y += ${BATCH_TILE}; |
| 45 | } |
| 46 | $if BATCH_TILE >= 4: |
| 47 | for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { |
| 48 | __m128 vacc = _mm_loadu_ps(x); |
| 49 | x += 4; |
| 50 | |
| 51 | vacc = _mm_max_ps(vacc, vy_min); |
| 52 | vacc = _mm_min_ps(vacc, vy_max); |
| 53 | |
| 54 | _mm_storeu_ps(y, vacc); |
| 55 | y += 4; |
| 56 | } |
| 57 | if XNN_UNLIKELY(n != 0) { |
| 58 | __m128 vacc = _mm_loadu_ps(x); |
| 59 | vacc = _mm_max_ps(vacc, vy_min); |
| 60 | vacc = _mm_min_ps(vacc, vy_max); |
| 61 | |
| 62 | if (n & (2 * sizeof(float))) { |
| 63 | _mm_storel_pi((__m64*) y, vacc); |
| 64 | vacc = _mm_movehl_ps(vacc, vacc); |
| 65 | y += 2; |
| 66 | } |
| 67 | if (n & (1 * sizeof(float))) { |
| 68 | _mm_store_ss(y, vacc); |
| 69 | } |
| 70 | } |
| 71 | } |