Marat Dukhan | 05ac8e3 | 2019-10-21 15:39:33 -0700 | [diff] [blame] | 1 | // Copyright 2019 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | #include <assert.h> |
| 7 | |
| 8 | #include <immintrin.h> |
| 9 | |
| 10 | #include <xnnpack/common.h> |
| 11 | #include <xnnpack/vscale.h> |
| 12 | |
| 13 | |
| 14 | void xnn_f32_vscale_ukernel__avx_unroll32( |
| 15 | size_t n, |
| 16 | const float* x, |
| 17 | float* y, |
| 18 | float c) |
| 19 | { |
| 20 | assert(n != 0); |
| 21 | assert(n % sizeof(float) == 0); |
| 22 | |
| 23 | __m256 vc = _mm256_set1_ps(c); |
| 24 | for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) { |
| 25 | const __m256 vx0 = _mm256_loadu_ps(x); |
| 26 | const __m256 vx1 = _mm256_loadu_ps(x + 8); |
| 27 | const __m256 vx2 = _mm256_loadu_ps(x + 16); |
| 28 | const __m256 vx3 = _mm256_loadu_ps(x + 24); |
| 29 | x += 32; |
| 30 | |
| 31 | const __m256 vy0 = _mm256_mul_ps(vx0, vc); |
| 32 | const __m256 vy1 = _mm256_mul_ps(vx1, vc); |
| 33 | const __m256 vy2 = _mm256_mul_ps(vx2, vc); |
| 34 | const __m256 vy3 = _mm256_mul_ps(vx3, vc); |
| 35 | |
| 36 | _mm256_storeu_ps(y, vy0); |
| 37 | _mm256_storeu_ps(y + 8, vy1); |
| 38 | _mm256_storeu_ps(y + 16, vy2); |
| 39 | _mm256_storeu_ps(y + 24, vy3); |
| 40 | y += 32; |
| 41 | } |
| 42 | for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { |
| 43 | const __m256 vx = _mm256_loadu_ps(x); |
| 44 | x += 8; |
| 45 | |
| 46 | const __m256 vy = _mm256_mul_ps(vx, vc); |
| 47 | |
| 48 | _mm256_storeu_ps(y, vy); |
| 49 | y += 8; |
| 50 | } |
| 51 | if XNN_UNLIKELY(n != 0) { |
| 52 | do { |
| 53 | const __m128 vx = _mm_load_ss(x); |
| 54 | x += 1; |
| 55 | |
| 56 | const __m128 vy = _mm_mul_ss(vx, _mm256_castps256_ps128(vc)); |
| 57 | |
| 58 | _mm_store_ss(y, vy); |
| 59 | y += 1; |
| 60 | |
| 61 | n -= sizeof(float); |
| 62 | } while (n != 0); |
| 63 | } |
| 64 | } |