blob: 11b88f19d2d13cf81b8420c712f1c44e3ce223a4 [file] [log] [blame]
Marat Dukhan5c5fa962020-03-10 18:38:33 -07001// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <xmmintrin.h>
12
13#include <xnnpack/clamp.h>
14#include <xnnpack/common.h>
15
16
17void xnn_f32_clamp_ukernel__sse_x${BATCH_TILE}(
18 size_t n,
19 const float* x,
20 float* y,
Marat Dukhanb2217dd2020-05-28 17:30:28 -070021 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
Marat Dukhan5c5fa962020-03-10 18:38:33 -070022{
23 assert(n != 0);
24 assert(n % sizeof(float) == 0);
Frank Barchard04d6cf12020-07-15 16:17:51 -070025 assert(x != NULL);
26 assert(y != NULL);
Marat Dukhan5c5fa962020-03-10 18:38:33 -070027
28 const __m128 vy_min = _mm_load_ps(params->sse.min);
29 const __m128 vy_max = _mm_load_ps(params->sse.max);
30
31 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
32 __m128 vacc${ABC[0:4]} = _mm_loadu_ps(x);
33 $for N in range(4, BATCH_TILE, 4):
34 __m128 vacc${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
35 x += ${BATCH_TILE};
36
37 $for N in range(0, BATCH_TILE, 4):
38 vacc${ABC[N:N+4]} = _mm_max_ps(vacc${ABC[N:N+4]}, vy_min);
39
40 $for N in range(0, BATCH_TILE, 4):
41 vacc${ABC[N:N+4]} = _mm_min_ps(vacc${ABC[N:N+4]}, vy_max);
42
43 _mm_storeu_ps(y, vacc${ABC[0:4]});
44 $for N in range(4, BATCH_TILE, 4):
45 _mm_storeu_ps(y + ${N}, vacc${ABC[N:N+4]});
46 y += ${BATCH_TILE};
47 }
Frank Barcharde8466f42020-06-08 17:05:01 -070048 $if BATCH_TILE > 4:
Marat Dukhan5c5fa962020-03-10 18:38:33 -070049 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
50 __m128 vacc = _mm_loadu_ps(x);
51 x += 4;
52
53 vacc = _mm_max_ps(vacc, vy_min);
54 vacc = _mm_min_ps(vacc, vy_max);
55
56 _mm_storeu_ps(y, vacc);
57 y += 4;
58 }
59 if XNN_UNLIKELY(n != 0) {
60 __m128 vacc = _mm_loadu_ps(x);
61 vacc = _mm_max_ps(vacc, vy_min);
62 vacc = _mm_min_ps(vacc, vy_max);
63
64 if (n & (2 * sizeof(float))) {
65 _mm_storel_pi((__m64*) y, vacc);
66 vacc = _mm_movehl_ps(vacc, vacc);
67 y += 2;
68 }
69 if (n & (1 * sizeof(float))) {
70 _mm_store_ss(y, vacc);
71 }
72 }
73}