blob: f96941d2f376f807fae144bdad73a87c84243edc [file] [log] [blame]
Marat Dukhan40a672f2019-11-25 03:08:22 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-vbinary/vop-neon.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vbinary.h>
16
17
Marat Dukhan91cd2b72020-04-09 23:57:31 -070018void xnn_f32_vmul_minmax_ukernel__neon_x8(
Marat Dukhan40a672f2019-11-25 03:08:22 -080019 size_t n,
20 const float* a,
21 const float* b,
22 float* y,
Marat Dukhanb2217dd2020-05-28 17:30:28 -070023 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
Marat Dukhan40a672f2019-11-25 03:08:22 -080024{
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
Frank Barchard0822dde2020-07-04 12:47:24 -070027 assert(a != NULL);
28 assert(b != NULL);
29 assert(y != NULL);
Marat Dukhan40a672f2019-11-25 03:08:22 -080030
31 const float32x4_t vy_min = vld1q_dup_f32(&params->scalar.min);
32 const float32x4_t vy_max = vld1q_dup_f32(&params->scalar.max);
33
34 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
35 const float32x4_t va0123 = vld1q_f32(a); a += 4;
36 const float32x4_t vb0123 = vld1q_f32(b); b += 4;
37 const float32x4_t va4567 = vld1q_f32(a); a += 4;
38 const float32x4_t vb4567 = vld1q_f32(b); b += 4;
39
40 float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
41 float32x4_t vy4567 = vmulq_f32(va4567, vb4567);
42
Marat Dukhan13bafb02020-06-05 00:43:11 -070043
Marat Dukhan40a672f2019-11-25 03:08:22 -080044 vy0123 = vmaxq_f32(vy0123, vy_min);
45 vy4567 = vmaxq_f32(vy4567, vy_min);
46
47 vy0123 = vminq_f32(vy0123, vy_max);
48 vy4567 = vminq_f32(vy4567, vy_max);
49
50 vst1q_f32(y, vy0123); y += 4;
51 vst1q_f32(y, vy4567); y += 4;
52 }
53 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
54 const float32x4_t va0123 = vld1q_f32(a); a += 4;
55 const float32x4_t vb0123 = vld1q_f32(b); b += 4;
56
57 float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
58 vy0123 = vmaxq_f32(vy0123, vy_min);
59 vy0123 = vminq_f32(vy0123, vy_max);
60 vst1q_f32(y, vy0123); y += 4;
61 }
62 if XNN_UNLIKELY(n != 0) {
63 const float32x4_t va0123 = vld1q_f32(a);
64 const float32x4_t vb0123 = vld1q_f32(b);
65
66 float32x4_t vy0123 = vmulq_f32(va0123, vb0123);
67 vy0123 = vmaxq_f32(vy0123, vy_min);
68 vy0123 = vminq_f32(vy0123, vy_max);
69
70 float32x2_t vy01 = vget_low_f32(vy0123);
71 if (n & (2 * sizeof(float))) {
72 vst1_f32(y, vy01); y += 2;
73 vy01 = vget_high_f32(vy0123);
74 }
75 if (n & (1 * sizeof(float))) {
76 vst1_lane_f32(y, vy01, 0);
77 }
78 }
79}