blob: be5cf703e17039e001e53ea95a1e58d139b09e6b [file] [log] [blame]
Frank Barchard9c7308f2020-08-31 17:03:01 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-vbinary/vopc-wasmsimd.c.in
3// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vbinary.h>
16
17
18void xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16(
19 size_t n,
20 const float* a,
21 const float* b,
22 float* y,
23 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24{
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(a != NULL);
28 assert(b != NULL);
29 assert(y != NULL);
30
Marat Dukhanee029b22021-06-30 12:47:02 -070031 const v128_t vy_min = wasm_v128_load32_splat(&params->scalar.min);
32 const v128_t vy_max = wasm_v128_load32_splat(&params->scalar.max);
33 const v128_t vb = wasm_v128_load32_splat(b);
Frank Barchard9c7308f2020-08-31 17:03:01 -070034 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
35 const v128_t va0123 = wasm_v128_load(a);
36 const v128_t va4567 = wasm_v128_load(a + 4);
37 const v128_t va89AB = wasm_v128_load(a + 8);
38 const v128_t vaCDEF = wasm_v128_load(a + 12);
39 a += 16;
40
41 v128_t vy0123 = wasm_f32x4_sub(vb, va0123);
42 v128_t vy4567 = wasm_f32x4_sub(vb, va4567);
43 v128_t vy89AB = wasm_f32x4_sub(vb, va89AB);
44 v128_t vyCDEF = wasm_f32x4_sub(vb, vaCDEF);
45
46
47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min);
48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min);
49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min);
50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min);
51
52 const v128_t vngtmask0123 = wasm_f32x4_le(vy0123, vy_max);
53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123);
54 const v128_t vngtmask4567 = wasm_f32x4_le(vy4567, vy_max);
55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567);
56 const v128_t vngtmask89AB = wasm_f32x4_le(vy89AB, vy_max);
57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB);
58 const v128_t vngtmaskCDEF = wasm_f32x4_le(vyCDEF, vy_max);
59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF);
60
61 vy0123 = wasm_v128_bitselect(vy0123, vy_max, vngtmask0123);
62 vy4567 = wasm_v128_bitselect(vy4567, vy_max, vngtmask4567);
63 vy89AB = wasm_v128_bitselect(vy89AB, vy_max, vngtmask89AB);
64 vyCDEF = wasm_v128_bitselect(vyCDEF, vy_max, vngtmaskCDEF);
65
66 wasm_v128_store(y, vy0123);
67 wasm_v128_store(y + 4, vy4567);
68 wasm_v128_store(y + 8, vy89AB);
69 wasm_v128_store(y + 12, vyCDEF);
70 y += 16;
71 }
72 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
73 const v128_t va = wasm_v128_load(a);
74 a += 4;
75
76 v128_t vy = wasm_f32x4_sub(vb, va);
77
78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min);
79 const v128_t vngtmask = wasm_f32x4_le(vy, vy_max);
80 vy = wasm_v128_bitselect(vy_min, vy, vltmask);
81 vy = wasm_v128_bitselect(vy, vy_max, vngtmask);
82
83 wasm_v128_store(y, vy);
84 y += 4;
85 }
86 if XNN_UNLIKELY(n != 0) {
87 const v128_t va = wasm_v128_load(a);
88
89 v128_t vy = wasm_f32x4_sub(vb, va);
90
91 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min);
92 const v128_t vngtmask = wasm_f32x4_le(vy, vy_max);
93 vy = wasm_v128_bitselect(vy_min, vy, vltmask);
94 vy = wasm_v128_bitselect(vy, vy_max, vngtmask);
95
96 if (n & (2 * sizeof(float))) {
97 *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
98 vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
99 y += 2;
100 }
101 if (n & (1 * sizeof(float))) {
102 *y = wasm_f32x4_extract_lane(vy, 0);
103 }
104 }
105}