| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert NR % 4 == 0 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <wasm_simd128.h> |
| |
| #include <xnnpack/igemm.h> |
| |
| |
| $assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"] |
| $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower()) |
| $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm" |
| $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION] |
| void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd_loadsplat${ARCH_SUFFIX}( |
| size_t mr, |
| size_t nc, |
| size_t kc, |
| size_t ks, |
| const float**restrict a, |
| const float*restrict w, |
| float*restrict c, |
| size_t cm_stride, |
| size_t cn_stride, |
| size_t a_offset, |
| const float* zero, |
| const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| assert(mr != 0); |
| assert(mr <= ${MR}); |
| assert(nc != 0); |
| assert(kc != 0); |
| assert(kc % sizeof(float) == 0); |
| assert(ks != 0); |
| assert(ks % (${MR} * sizeof(void*)) == 0); |
| assert(a_offset % sizeof(float) == 0); |
| assert(a != NULL); |
| assert(w != NULL); |
| assert(c != NULL); |
| |
| float* c0 = c; |
| $for M in range(1, MR): |
| float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); |
| $if M % 2 == 0: |
| if XNN_UNPREDICTABLE(mr <= ${M}) { |
| c${M} = c${M-1}; |
| } |
| $elif M + 1 == MR: |
| if XNN_UNPREDICTABLE(mr != ${M+1}) { |
| c${M} = c${M-1}; |
| } |
| $else: |
| if XNN_UNPREDICTABLE(mr < ${M+1}) { |
| c${M} = c${M-1}; |
| } |
| |
| $if ACTIVATION == "MINMAX" and not X86: |
| const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); |
| const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); |
| do { |
| v128_t vacc0x${ABC[0:4]} = wasm_v128_load(w); |
| $for N in range(4, NR, 4): |
| v128_t vacc0x${ABC[N:N+4]} = wasm_v128_load(w + ${N}); |
| $for M in range(1, MR): |
| $for N in range(0, NR, 4): |
| v128_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; |
| w += ${NR}; |
| |
| size_t p = ks; |
| do { |
| $for M in range(MR): |
| const float* restrict a${M} = a[${M}]; |
| assert(a${M} != NULL); |
| if XNN_UNPREDICTABLE(a${M} != zero) { |
| a${M} = (const float*) ((uintptr_t) a${M} + a_offset); |
| } |
| a += ${MR}; |
| |
| size_t k = kc; |
| do { |
| const v128_t vb${ABC[0:4]} = wasm_v128_load(w); |
| $for N in range(4, NR, 4): |
| const v128_t vb${ABC[N:N+4]} = wasm_v128_load(w + ${N}); |
| w += ${NR}; |
| |
| $for M in range(MR): |
| const v128_t va${M} = wasm_v32x4_load_splat(a${M}); |
| a${M} += 1; |
| |
| $for M in range(MR): |
| $for N in range(0, NR, 4): |
| vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]})); |
| k -= sizeof(float); |
| } while (k != 0); |
| p -= ${MR} * sizeof(void*); |
| } while (p != 0); |
| |
| $if ACTIVATION == "MINMAX": |
| $if X86: |
| const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); |
| $for N in range(0, NR, 4): |
| $for M in range(MR): |
| vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vmin, vacc${M}x${ABC[N:N+4]}, wasm_f32x4_lt(vacc${M}x${ABC[N:N+4]}, vmin)); |
| $else: |
| $for N in range(0, NR, 4): |
| $for M in range(MR): |
| vacc${M}x${ABC[N:N+4]} = wasm_f32x4_max(vacc${M}x${ABC[N:N+4]}, vmin); |
| |
| $if X86: |
| const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); |
| $for N in range(0, NR, 4): |
| $for M in range(MR): |
| vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vacc${M}x${ABC[N:N+4]}, vmax, wasm_f32x4_le(vacc${M}x${ABC[N:N+4]}, vmax)); |
| $else: |
| $for N in range(0, NR, 4): |
| $for M in range(MR): |
| vacc${M}x${ABC[N:N+4]} = wasm_f32x4_min(vacc${M}x${ABC[N:N+4]}, vmax); |
| $elif ACTIVATION == "RELU": |
| const v128_t vzero = wasm_f32x4_splat(0.0f); |
| $for N in range(0, NR, 4): |
| $for M in range(MR): |
| vacc${M}x${ABC[N:N+4]} = wasm_i32x4_max(vacc${M}x${ABC[N:N+4]}, vzero); |
| |
| if XNN_LIKELY(nc >= ${NR}) { |
| $for M in reversed(range(MR)): |
| wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); |
| $for N in range(4, NR, 4): |
| wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); |
| c${M} = (float*) ((uintptr_t) c${M} + cn_stride); |
| |
| a = (const float**restrict) ((uintptr_t) a - ks); |
| nc -= ${NR}; |
| } else { |
| $for LOG2N in reversed(range(NR.bit_length())): |
| $if NR != 1 << LOG2N: |
| if (nc & ${1 << LOG2N}) { |
| $if LOG2N >= 2: |
| $for M in reversed(range(MR)): |
| wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); |
| $for N in range(4, 1 << LOG2N, 4): |
| wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); |
| |
| $for M in reversed(range(MR)): |
| $for N in range(0, 1 << (LOG2N - 1), 4): |
| vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]}; |
| |
| $for M in reversed(range(MR)): |
| c${M} += ${1 << LOG2N}; |
| $elif LOG2N == 1: |
| $for M in reversed(range(MR)): |
| *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}x${ABC[0:4]}, 0); |
| |
| $for M in reversed(range(MR)): |
| vacc${M}x${ABC[0:4]} = wasm_v32x4_shuffle(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2, 3, 2, 3); |
| |
| $for M in reversed(range(MR)): |
| c${M} += 2; |
| $elif LOG2N == 0: |
| $for M in reversed(range(MR)): |
| *c${M} = wasm_f32x4_extract_lane(vacc${M}x${ABC[0:4]}, 0); |
| } |
| |
| nc = 0; |
| } |
| } while (nc != 0); |
| } |