XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 1 | // Copyright 2019 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | $assert NR == 2 |
| 7 | $assert MR % 2 == 0 |
| 8 | #include <assert.h> |
| 9 | |
| 10 | #include <xmmintrin.h> |
| 11 | |
| 12 | #include <xnnpack/igemm.h> |
| 13 | |
| 14 | |
Marat Dukhan | de06f49 | 2020-04-09 00:19:31 -0700 | [diff] [blame] | 15 | void xnn_f32_igemm_minmax_ukernel_${MR}x${NR}c4__sse( |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 16 | size_t mr, |
| 17 | size_t nc, |
| 18 | size_t kc, |
| 19 | size_t ks, |
| 20 | const float**restrict a, |
| 21 | const float*restrict w, |
| 22 | float*restrict c, |
| 23 | size_t cm_stride, |
| 24 | size_t cn_stride, |
| 25 | size_t a_offset, |
| 26 | const float* zero, |
Marat Dukhan | b2217dd | 2020-05-28 17:30:28 -0700 | [diff] [blame] | 27 | const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 28 | { |
| 29 | assert(mr != 0); |
| 30 | assert(mr <= ${MR}); |
| 31 | assert(nc != 0); |
| 32 | assert(kc != 0); |
| 33 | assert(kc % sizeof(float) == 0); |
| 34 | assert(ks != 0); |
| 35 | assert(ks % (${MR} * sizeof(void*)) == 0); |
| 36 | assert(a_offset % sizeof(float) == 0); |
Frank Barchard | 7ccaab6 | 2019-11-19 14:19:23 -0800 | [diff] [blame] | 37 | assert(a != NULL); |
| 38 | assert(w != NULL); |
| 39 | assert(c != NULL); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 40 | |
| 41 | float* c0 = c; |
| 42 | $for M in range(1, MR): |
| 43 | float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); |
| 44 | $if M % 2 == 0: |
| 45 | if XNN_UNPREDICTABLE(mr <= ${M}) { |
| 46 | c${M} = c${M-1}; |
| 47 | } |
| 48 | $elif M + 1 == MR: |
| 49 | if XNN_UNPREDICTABLE(mr != ${M+1}) { |
| 50 | c${M} = c${M-1}; |
| 51 | } |
| 52 | $else: |
| 53 | if XNN_UNPREDICTABLE(mr < ${M+1}) { |
| 54 | c${M} = c${M-1}; |
| 55 | } |
| 56 | |
| 57 | do { |
| 58 | __m128 vacc0x0c4 = _mm_load_ss(w); |
| 59 | $for N in range(1, NR): |
| 60 | __m128 vacc0x${N}c4 = _mm_load_ss(w + ${N}); |
| 61 | $for M in range(1, MR): |
| 62 | $for N in range(NR): |
| 63 | __m128 vacc${M}x${N}c4 = vacc0x${N}c4; |
| 64 | w += ${NR}; |
| 65 | |
| 66 | size_t p = ks; |
| 67 | do { |
| 68 | $for M in range(MR): |
| 69 | const float* restrict a${M} = a[${M}]; |
Frank Barchard | 7ccaab6 | 2019-11-19 14:19:23 -0800 | [diff] [blame] | 70 | assert(a${M} != NULL); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 71 | if XNN_UNPREDICTABLE(a${M} != zero) { |
| 72 | a${M} = (const float*) ((uintptr_t) a${M} + a_offset); |
| 73 | } |
| 74 | a += ${MR}; |
| 75 | |
| 76 | size_t k = kc; |
| 77 | for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { |
| 78 | $for M in range(MR): |
| 79 | const __m128 va${M} = _mm_loadu_ps(a${M}); |
| 80 | a${M} += 4; |
| 81 | |
| 82 | const __m128 vb0 = _mm_loadu_ps(w); |
| 83 | $for N in range(1, NR): |
| 84 | const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4}); |
| 85 | w += ${NR * 4}; |
| 86 | |
| 87 | $for M in range(MR): |
| 88 | $for N in range(NR): |
| 89 | vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(va${M}, vb${N})); |
| 90 | } |
| 91 | if XNN_UNLIKELY(k != 0) { |
| 92 | $for M in range(MR): |
| 93 | const __m128 va${M} = _mm_loadu_ps(a${M}); |
| 94 | |
| 95 | const __m128 vb0 = _mm_loadu_ps(w); |
| 96 | $for N in range(1, NR): |
| 97 | const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4}); |
| 98 | w += ${NR * 4}; |
| 99 | |
| 100 | $for N in range(NR): |
| 101 | const __m128 vmask${N} = _mm_cmpeq_ps(_mm_setzero_ps(), vb${N}); |
| 102 | |
| 103 | $for M in range(MR): |
| 104 | $for N in range(NR): |
| 105 | vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(_mm_andnot_ps(vmask${N}, va${M}), vb${N})); |
| 106 | } |
| 107 | p -= ${MR} * sizeof(void*); |
| 108 | } while (p != 0); |
| 109 | |
| 110 | $for M in range(MR): |
| 111 | const __m128 vacc${M}x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc${M}x0c4, vacc${M}x1c4), _mm_unpackhi_ps(vacc${M}x0c4, vacc${M}x1c4)); |
| 112 | |
| 113 | $for M in range(0, MR, 2): |
| 114 | __m128 vacc${M}${M+1}x01 = _mm_add_ps(_mm_movelh_ps(vacc${M}x01c2, vacc${M+1}x01c2), _mm_movehl_ps(vacc${M+1}x01c2, vacc${M}x01c2)); |
| 115 | |
| 116 | const __m128 vmax = _mm_load_ps(params->sse.max); |
| 117 | $for M in range(0, MR, 2): |
| 118 | vacc${M}${M+1}x01 = _mm_min_ps(vacc${M}${M+1}x01, vmax); |
| 119 | |
| 120 | const __m128 vmin = _mm_load_ps(params->sse.min); |
| 121 | $for M in range(0, MR, 2): |
| 122 | vacc${M}${M+1}x01 = _mm_max_ps(vacc${M}${M+1}x01, vmin); |
| 123 | |
| 124 | if XNN_LIKELY(nc >= ${NR}) { |
| 125 | $for M in reversed(range(0, MR, 2)): |
| 126 | _mm_storeh_pi((__m64*) c${M+1}, vacc${M}${M+1}x01); |
| 127 | c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride); |
| 128 | _mm_storel_pi((__m64*) c${M}, vacc${M}${M+1}x01); |
| 129 | c${M} = (float*) ((uintptr_t) c${M} + cn_stride); |
| 130 | |
| 131 | a = (const float**restrict) ((uintptr_t) a - ks); |
| 132 | nc -= ${NR}; |
| 133 | } else { |
| 134 | assert(nc == 1); |
| 135 | $for M in reversed(range(0, MR, 2)): |
| 136 | _mm_store_ss(c${M+1}, _mm_movehl_ps(vacc${M}${M+1}x01, vacc${M}${M+1}x01)); |
| 137 | _mm_store_ss(c${M}, vacc${M}${M+1}x01); |
| 138 | |
| 139 | nc = 0; |
| 140 | } |
| 141 | } while (nc != 0); |
| 142 | } |