| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| $assert NR % 8 == 0 |
| $assert 8 <= NR <= 16 |
| |
| #include <assert.h> |
| |
| #include <arm_neon.h> |
| |
| #include <xnnpack/gemm.h> |
| |
| |
| // This kernel uses ARMv8.2 dot-product instructions. |
| // |
| // Scalar model: xnn_qs8_gemm_minmax_ukernel_${MR}x${NR}c4__scalar. Refer to |
| // that kernel for more comments. |
| void xnn_qs8_gemm_minmax_ukernel_${MR}x${NR}c4__neondot( |
| size_t mr, |
| size_t nc, |
| size_t kc, |
| const int8_t* restrict a, |
| size_t a_stride, |
| const void* restrict w, |
| int8_t* restrict c, |
| size_t cm_stride, |
| size_t cn_stride, |
| const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { |
| assert(mr != 0); |
| assert(mr <= ${MR}); |
| assert(nc != 0); |
| assert(kc != 0); |
| |
| const int8_t* a0 = a; |
| int8_t* c0 = c; |
| $for M in range(1, MR): |
| const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride); |
| int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); |
| $if M % 2 == 0: |
| if XNN_UNPREDICTABLE(mr <= ${M}) { |
| a${M} = a${M-1}; |
| c${M} = c${M-1}; |
| } |
| $elif M + 1 == MR: |
| if XNN_UNPREDICTABLE(mr != ${M+1}) { |
| a${M} = a${M-1}; |
| c${M} = c${M-1}; |
| } |
| $else: |
| if XNN_UNPREDICTABLE(mr < ${M+1}) { |
| a${M} = a${M-1}; |
| c${M} = c${M-1}; |
| } |
| |
| // Loop over groups of ${NR} columns. |
| do { |
| // Initialize accumulators with bias. ${NR} bias values are loaded from the |
| // weight matrix, at the start of the group of ${NR} columns. |
| $for N in range(0, NR, 4): |
| int32x4_t vacc0x${ABC[N:N+4]} = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); |
| $for M in range(1, MR): |
| $for N in range(0, NR, 4): |
| int32x4_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; |
| |
| // Inner accumulation loop along the ${NR} columns. |
| size_t k = kc; |
| // 2x partial unrolled loop to load 8 bytes at a time. |
| while (k >= 8 * sizeof(int8_t)) { |
| // Load a ${MR}x8 block of activations. |
| $for M in range(MR): |
| const int8x8_t va${M}x01234567 = vld1_s8(a${M}); a${M} += 8; |
| |
| // Load a 8x${NR} block of weights. |
| $for K in range(0, 8, 4): |
| $for N in range(0, NR, 4): |
| const int8x16_t vb${ABC[K:K+4]}x${ABC[N:N+4]} = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| |
| // Multiply-accumulate: ${MR}x8 * 8x${NR} --> ${MR}x${NR}. |
| $for K in range(0, 8, 4): |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = vdotq_lane_s32(vacc${M}x${ABC[N:N+4]}, vb${ABC[K:K+4]}x${ABC[N:N+4]}, va${M}x01234567, ${K/4}); |
| |
| k -= 8 * sizeof(int8_t); |
| } |
| // Handle up to 7 final positions of `k` |
| if XNN_UNLIKELY(k != 0) { |
| // Load a ${MR}x4 block of activations. |
| $for M in range(MR): |
| const int8x8_t va${M}x01234567 = vld1_s8(a${M}); a${M} += k; |
| |
| // Load a 4x${NR} block of weights. |
| $for N in range(0, NR, 4): |
| const int8x16_t vb0123x${ABC[N:N+4]} = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| |
| // Multiply-accumulate: ${MR}x4 * 4x${NR} --> ${MR}x${NR}. |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = vdotq_lane_s32(vacc${M}x${ABC[N:N+4]}, vb0123x${ABC[N:N+4]}, va${M}x01234567, 0); |
| |
| if (k > 4) { |
| // Load a 4x${NR} block of weights. |
| $for N in range(0, NR, 4): |
| const int8x16_t vb4567x${ABC[N:N+4]} = vld1q_s8(w); w = (const void*)((const int8_t*)w + 16); |
| |
| // Multiply-accumulate: ${MR}x4 * 4x${NR} --> ${MR}x${NR}. |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = vdotq_lane_s32(vacc${M}x${ABC[N:N+4]}, vb4567x${ABC[N:N+4]}, va${M}x01234567, 1); |
| } |
| } |
| // End of accumulation loop. The variable `kc` contains the amount by which |
| // we advanced the `va` pointers, so we rewind by this amount now. |
| $for M in range(MR): |
| a${M} = (const int8_t*)((uintptr_t)a${M} - kc); |
| |
| // Post-accumulation work |
| |
| const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); |
| const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| const int32x4_t vproduct${M}x${ABC[N:N+4]} = vqrdmulhq_n_s32(vacc${M}x${ABC[N:N+4]}, params->neon.multiplier); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vproduct${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift); |
| |
| const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point); |
| #if XNN_ARCH_ARM64 |
| $for M in range(MR): |
| $for N in range(0, NR, 8): |
| const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 16): |
| $if N + 8 < NR: |
| int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]}); |
| $elif M % 2 == 1: |
| int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]}); |
| $elif M + 1 == MR: |
| int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); |
| #else |
| $for M in range(MR): |
| $for N in range(0, NR, 8): |
| const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 16): |
| $if N + 8 < NR: |
| int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]})); |
| $elif M % 2 == 1: |
| int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]})); |
| $elif M + 1 == MR: |
| int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); |
| #endif |
| $if NR == 8 and MR == 1: |
| const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min); |
| const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max); |
| $else: |
| const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min); |
| const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 16): |
| $if N + 8 < NR: |
| vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min); |
| $elif M % 2 == 1: |
| vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min); |
| $elif M + 1 == MR: |
| $if NR == 8 and MR == 1: |
| vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min); |
| $else: |
| vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min)); |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 16): |
| $if N + 8 < NR: |
| vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max); |
| $elif M % 2 == 1: |
| vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max); |
| $elif M + 1 == MR: |
| $if NR == 8 and MR == 1: |
| vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max); |
| $else: |
| vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max)); |
| |
| if (nc >= ${NR}) { |
| // Main case where there the ${NR} columns fit in the destination. |
| $for M in range(MR): |
| $for N in range(0, NR, 16): |
| $if N + 8 < NR: |
| vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]}); |
| $elif M % 2 == 1: |
| vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); |
| vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); |
| $elif M + 1 == MR: |
| vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]}); |
| |
| // Advance to the next ${NR} columns. |
| $for M in range(MR): |
| c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); |
| |
| nc -= ${NR}; |
| } else { |
| // Final case where not all of the ${NR} columns fit in the destination. |
| $if NR == 16: |
| $for M in range(MR): |
| $if M % 2 == 1: |
| int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF)); |
| $elif M + 1 == MR: |
| int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF); |
| if (nc & 8) { |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M-1} += 8; |
| vst1_s8(c${M}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M} += 8; |
| $elif M + 1 == MR: |
| vst1_s8(c${M}, vout${M}x${ABC[N:N+8]}); c${M} += 8; |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF)); |
| $elif M + 1 == MR: |
| vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF); |
| } |
| if (nc & 4) { |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4; |
| vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4; |
| $elif M + 1 == MR: |
| vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4; |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4); |
| $elif M + 1 == MR: |
| vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4); |
| } |
| if (nc & 2) { |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2; |
| vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2; |
| $elif M + 1 == MR: |
| vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2; |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2); |
| $elif M + 1 == MR: |
| vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2); |
| } |
| if (nc & 1) { |
| $for M in range(MR): |
| $if M % 2 == 1: |
| vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0); |
| vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8); |
| $elif M + 1 == MR: |
| vst1_lane_s8(c${M}, vout${M}x01234567, 0); |
| } |
| |
| nc = 0; |
| } |
| } while (nc != 0); |
| } |