| // Copyright 2019 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert MR % 4 == 0 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/spmm.h> |
| |
| |
| void xnn_f32_spmm_minmax_ukernel_${MR}x${NR}__sse${"_x" + str(UNROLL) if UNROLL > 1 else ""}( |
| size_t mc, |
| size_t nc, |
| const float*restrict input, |
| const float*restrict weights, |
| const int32_t*restrict widx_dmap, |
| const uint32_t*restrict nidx_nnzmap, |
| float*restrict output, |
| size_t output_stride, |
| const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| assert(mc != 0); |
| assert(mc % sizeof(float) == 0); |
| assert(nc != 0); |
| |
| const __m128 vmin = _mm_load_ps(params->sse.min); |
| const __m128 vmax = _mm_load_ps(params->sse.max); |
| size_t output_decrement = output_stride * nc - ${MR} * sizeof(float); |
| while XNN_LIKELY(mc >= ${MR} * sizeof(float)) { |
| const float*restrict w = weights; |
| const int32_t* dmap = widx_dmap; |
| const uint32_t* nnzmap = nidx_nnzmap; |
| size_t n = nc; |
| do { |
| uint32_t nnz = *nnzmap++; |
| $if UNROLL > 1: |
| __m128 vacc0123x0 = _mm_load1_ps(w); |
| w += 1; |
| $for K in range(1, UNROLL): |
| __m128 vacc0123x${K} = _mm_setzero_ps(); |
| $for M in range(4, MR, 4): |
| __m128 vacc${ABC[M:M+4]}x0 = vacc0123x0; |
| $for K in range(1, UNROLL): |
| __m128 vacc${ABC[M:M+4]}x${K} = _mm_setzero_ps(); |
| for (; nnz >= ${UNROLL}; nnz -= ${UNROLL}) { |
| $for K in range(UNROLL): |
| const intptr_t diff${K} = dmap[${K}]; |
| dmap += ${UNROLL}; |
| $for K in range(UNROLL): |
| const __m128 vi0123x${K} = _mm_loadu_ps(input); |
| $for M in range(4, MR, 4): |
| const __m128 vi${ABC[M:M+4]}x${K} = _mm_loadu_ps(input + ${M}); |
| input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff${K}); |
| const __m128 vw${K} = _mm_load1_ps(w); |
| w += 1; |
| $for M in range(0, MR, 4): |
| vacc${ABC[M:M+4]}x${K} = _mm_add_ps(vacc${ABC[M:M+4]}x${K}, _mm_mul_ps(vi${ABC[M:M+4]}x${K}, vw${K})); |
| } |
| $for M in range(0, MR, 4): |
| __m128 vacc${ABC[M:M+4]} = vacc${ABC[M:M+4]}x0; |
| $for K in range(1, UNROLL): |
| $for M in range(0, MR, 4): |
| vacc${ABC[M:M+4]} = _mm_add_ps(vacc${ABC[M:M+4]}, vacc${ABC[M:M+4]}x${K}); |
| $else: |
| __m128 vacc0123 = _mm_load1_ps(w); w += 1; |
| $for M in range(4, MR, 4): |
| __m128 vacc${ABC[M:M+4]} = vacc0123; |
| if XNN_LIKELY(nnz != 0) { |
| do { |
| const intptr_t diff = *dmap++; |
| const __m128 vi0123 = _mm_loadu_ps(input); |
| $for M in range(4, MR, 4): |
| const __m128 vi${ABC[M:M+4]} = _mm_loadu_ps(input + ${M}); |
| input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff); |
| const __m128 vw = _mm_load1_ps(w); w += 1; |
| $for M in range(0, MR, 4): |
| vacc${ABC[M:M+4]} = _mm_add_ps(vacc${ABC[M:M+4]}, _mm_mul_ps(vi${ABC[M:M+4]}, vw)); |
| } while (--nnz != 0); |
| } |
| $for M in range(0, MR, 4): |
| __m128 vout${ABC[M:M+4]} = _mm_min_ps(vacc${ABC[M:M+4]}, vmax); |
| $for M in range(0, MR, 4): |
| vout${ABC[M:M+4]} = _mm_max_ps(vout${ABC[M:M+4]}, vmin); |
| _mm_storeu_ps(output, vout0123); |
| $for M in range(4, MR, 4): |
| _mm_storeu_ps(output + ${M}, vout${ABC[M:M+4]}); |
| output = (float*restrict) ((uintptr_t) output + output_stride); |
| } while (--n != 0); |
| output = (float*restrict) ((uintptr_t) output - output_decrement); |
| input += ${MR}; |
| mc -= ${MR} * sizeof(float); |
| } |
| if XNN_UNLIKELY(mc != 0) { |
| $for LOG2M in reversed(range((MR - 1).bit_length())): |
| $SUBMR = 1 << LOG2M |
| $if SUBMR * 2 >= MR: |
| output_decrement += ${MR - SUBMR} * sizeof(float); |
| $else: |
| output_decrement += ${SUBMR} * sizeof(float); |
| if (mc & (${SUBMR} * sizeof(float))) { |
| const float*restrict w = weights; |
| const int32_t* dmap = widx_dmap; |
| const uint32_t* nnzmap = nidx_nnzmap; |
| size_t n = nc; |
| do { |
| uint32_t nnz = *nnzmap++; |
| $if SUBMR == 1: |
| __m128 vacc0 = _mm_load_ss(w); w += 1; |
| $elif SUBMR == 2: |
| __m128 vacc01 = _mm_load_ss(w); w += 1; |
| vacc01 = _mm_unpacklo_ps(vacc01, vacc01); |
| $else: |
| __m128 vacc0123 = _mm_load1_ps(w); w += 1; |
| $for M in range(4, SUBMR, 4): |
| __m128 vacc${ABC[M:M+4]} = vacc0123; |
| if XNN_LIKELY(nnz != 0) { |
| do { |
| const intptr_t diff = *dmap++; |
| $if SUBMR >= 4: |
| const __m128 vi0123 = _mm_loadu_ps(input); |
| $elif SUBMR == 2: |
| const __m128 vi01 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) input); |
| $elif SUBMR == 1: |
| const __m128 vi0 = _mm_load_ss(input); |
| $for M in range(4, SUBMR, 4): |
| const __m128 vi${ABC[M:M+4]} = _mm_loadu_ps(input + ${M}); |
| input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff); |
| $if SUBMR >= 4: |
| const __m128 vw = _mm_load1_ps(w); w += 1; |
| $elif SUBMR == 2: |
| __m128 vw = _mm_load_ss(w); w += 1; |
| vw = _mm_unpacklo_ps(vw, vw); |
| $else: |
| const __m128 vw = _mm_load_ss(w); w += 1; |
| $if SUBMR == 1: |
| vacc${ABC[0]} = _mm_add_ss(vacc${ABC[0]}, _mm_mul_ss(vi${ABC[0]}, vw)); |
| $else: |
| $for M in range(0, SUBMR, 4): |
| vacc${ABC[M:min(M+4,SUBMR)]} = _mm_add_ps(vacc${ABC[M:min(M+4,SUBMR)]}, _mm_mul_ps(vi${ABC[M:min(M+4,SUBMR)]}, vw)); |
| } while (--nnz != 0); |
| } |
| $if SUBMR == 1: |
| __m128 vout${ABC[0]} = _mm_min_ss(vacc${ABC[0]}, vmax); |
| vout${ABC[0]} = _mm_max_ss(vout${ABC[0]}, vmin); |
| $else: |
| $for M in range(0, SUBMR, 4): |
| __m128 vout${ABC[M:min(M+4,SUBMR)]} = _mm_min_ps(vacc${ABC[M:min(M+4,SUBMR)]}, vmax); |
| $for M in range(0, SUBMR, 4): |
| vout${ABC[M:min(M+4,SUBMR)]} = _mm_max_ps(vout${ABC[M:min(M+4,SUBMR)]}, vmin); |
| $if SUBMR >= 4: |
| _mm_storeu_ps(output, vout0123); |
| $elif SUBMR == 2: |
| _mm_storel_pi((__m64*) output, vout01); |
| $elif SUBMR == 1: |
| _mm_store_ss(output, vout0); |
| $for M in range(4, SUBMR, 4): |
| _mm_storeu_ps(output + ${M}, vout${ABC[M:M+4]}); |
| output = (float*restrict) ((uintptr_t) output + output_stride); |
| } while (--n != 0); |
| output = (float*restrict) ((uintptr_t) output - output_decrement); |
| input += ${SUBMR}; |
| } |
| } |
| } |