blob: 39f3a3fbb739b7a6dd588b9e43d646457ac364bc [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert REQUANTIZATION == "FP32"
$assert DATATYPE in ["QC8", "QS8", "QU8"]
$assert not ADD16 or DATATYPE != "QU8"
$assert CHANNEL_TILE % 8 == 0
$assert CHANNEL_TILE >= 8
$assert KERNEL_TILE >= 2
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
$PARAMS_STRUCT = ("" if DATATYPE == "QC8" else REQUANTIZATION.lower() + "_") + "wasmsimd"
$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_%s_conv_minmax_params" % DATATYPE.lower()
$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
$WASM_X16X8_LOAD8X8 = "wasm_u16x8_load8x8" if DATATYPE == "QU8" else "wasm_i16x8_load8x8"
$WASM_X32X4_EXTEND_LOW_X16X8 = "wasm_u32x4_extend_low_u16x8" if DATATYPE == "QU8" else "wasm_i32x4_extend_low_i16x8"
$WASM_X32X4_EXTEND_HIGH_X16X8 = "wasm_u32x4_extend_high_u16x8" if DATATYPE == "QU8" else "wasm_i32x4_extend_high_i16x8"
$WASM_X8X16_NARROW_I16X8 = "wasm_u8x16_narrow_i16x8" if DATATYPE == "QU8" else "wasm_i8x16_narrow_i16x8"
$WASM_X8X16_MIN = "wasm_u8x16_min" if DATATYPE == "QU8" else "wasm_i8x16_min"
void xnn_${DATATYPE.lower()}_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__wasmsimd_mul16${"_add16" if ADD16 else ""}(
size_t channels,
size_t output_width,
const ${XINT8_T}** input,
const void* weights,
${XINT8_T}* output,
size_t input_stride,
size_t output_increment,
size_t input_offset,
const ${XINT8_T}* zero,
const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
$if DATATYPE == "QU8":
const v128_t vkernel_zero_point = wasm_u32x4_load16x4(params->${PARAMS_STRUCT}.kernel_zero_point);
do {
$for K in range(KERNEL_TILE):
const ${XINT8_T}* i${K} = input[${K}];
assert(i${K} != NULL);
if XNN_UNPREDICTABLE(i${K} != zero) {
i${K} = (const ${XINT8_T}*) ((uintptr_t) i${K} + input_offset);
}
input = (const ${XINT8_T}**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) {
v128_t vacc${ABC[0:4]} = wasm_v128_load(w);
$for C in range(4, CHANNEL_TILE, 4):
v128_t vacc${ABC[C:C+4]} = wasm_v128_load((const void*) ((uintptr_t) w + ${C} * sizeof(int32_t)));
$for K in range(KERNEL_TILE):
$for C in range(0, CHANNEL_TILE, 8):
$if C == 0:
const v128_t vi${K}x${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(i${K});
$else:
const v128_t vi${K}x${ABC[C:C+8]} = ${WASM_X16X8_LOAD8X8}(i${K} + ${C});
const v128_t vk${K}x${ABC[C:C+8]} = ${WASM_X16X8_LOAD8X8}((const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(${XINT8_T})));
$if DATATYPE == "QU8":
$for C in range(0, CHANNEL_TILE, 8):
$if K == 1:
v128_t vsumx${ABC[C:C+8]} = wasm_i16x8_add(vi0x${ABC[C:C+8]}, vi1x${ABC[C:C+8]});
$elif K > 1:
vsumx${ABC[C:C+8]} = wasm_i16x8_add(vsumx${ABC[C:C+8]}, vi${K}x${ABC[C:C+8]});
i${K} += ${CHANNEL_TILE};
$for C in range(0, CHANNEL_TILE, 8):
$if K == 0:
v128_t vprod${ABC[C:C+8]} = wasm_i16x8_mul(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]});
$elif K % 2 == 0 or K + 1 == KERNEL_TILE or not ADD16:
vprod${ABC[C:C+8]} = wasm_i16x8_mul(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]});
$else:
vprod${ABC[C:C+8]} = wasm_i16x8_add(vprod${ABC[C:C+8]}, wasm_i16x8_mul(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]}));
$if not ADD16 or K % 2 == 1 or K + 1 == KERNEL_TILE:
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+4]} = wasm_i32x4_add(vacc${ABC[C:C+4]}, ${WASM_X32X4_EXTEND_LOW_X16X8}(vprod${ABC[C:C+8]}));
vacc${ABC[C+4:C+8]} = wasm_i32x4_add(vacc${ABC[C+4:C+8]}, ${WASM_X32X4_EXTEND_HIGH_X16X8}(vprod${ABC[C:C+8]}));
$if DATATYPE == "QU8":
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+4]} = wasm_i32x4_sub(vacc${ABC[C:C+4]}, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vsumx${ABC[C:C+8]}), vkernel_zero_point));
vacc${ABC[C+4:C+8]} = wasm_i32x4_sub(vacc${ABC[C+4:C+8]}, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vsumx${ABC[C:C+8]}), vkernel_zero_point));
w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T}));
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_f32x4_convert_i32x4(vacc${ABC[C:C+4]});
$if DATATYPE == "QC8":
const v128_t vscale${ABC[0:4]} = wasm_v128_load(w);
$for C in range(4, CHANNEL_TILE, 4):
const v128_t vscale${ABC[C:C+4]} = wasm_v128_load((const float*) w + ${C});
w = (const void*) ((const float*) w + ${CHANNEL_TILE});
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_f32x4_mul(vacc${ABC[C:C+4]}, vscale${ABC[C:C+4]});
$else:
const v128_t vscale = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.scale);
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_f32x4_mul(vacc${ABC[C:C+4]}, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias);
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_f32x4_add(vacc${ABC[C:C+4]}, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_min);
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_i32x4_max(vacc${ABC[C:C+4]}, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = wasm_i32x4_sub(vacc${ABC[C:C+4]}, vmagic_bias_less_output_zero_point);
$for C in range(0, CHANNEL_TILE, 8):
v128_t vout${ABC[C:C+8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]});
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
v128_t vout${ABC[C:C+16]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]});
$else:
v128_t vout${ABC[C:C+8]}${ABC[C:C+8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[C:C+8]}, vout${ABC[C:C+8]});
const v128_t voutput_max = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.output_max);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
vout${ABC[C:C+16]} = ${WASM_X8X16_MIN}(vout${ABC[C:C+16]}, voutput_max);
$else:
vout${ABC[C:C+8]}${ABC[C:C+8]} = ${WASM_X8X16_MIN}(vout${ABC[C:C+8]}${ABC[C:C+8]}, voutput_max);
$if CHANNEL_TILE > 8:
wasm_v128_store(output, vout${ABC[0:16]});
$else:
*((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
$for C in range(16, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
wasm_v128_store(output + ${C}, vout${ABC[C:C+16]});
$else:
*((double*) (output + ${C})) = wasm_f64x2_extract_lane(vout${ABC[C:C+8]}${ABC[C:C+8]}, 0);
output += ${CHANNEL_TILE};
}
if XNN_UNLIKELY(c != 0) {
$if CHANNEL_TILE > 8:
const ${XINT8_T}* k = (const ${XINT8_T}*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t));
${"do " if CHANNEL_TILE > 8 else ""}{
v128_t vacc${ABC[0:4]} = wasm_v128_load(w);
v128_t vacc${ABC[4:8]} = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
$for K in range(KERNEL_TILE):
const v128_t vi${K}x${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(i${K});
$if CHANNEL_TILE > 8:
$if K == 0:
const v128_t vk${K}x${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(k);
$else:
const v128_t vk${K}x${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}((const void*) (k + ${K * CHANNEL_TILE}));
$else:
const v128_t vk${K}x${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}((const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(${XINT8_T})));
$if DATATYPE == "QU8":
$if K == 1:
v128_t vsumx${ABC[0:8]} = wasm_i16x8_add(vi0x${ABC[0:8]}, vi1x${ABC[0:8]});
$elif K > 1:
vsumx${ABC[0:8]} = wasm_i16x8_add(vsumx${ABC[0:8]}, vi${K}x${ABC[0:8]});
$if CHANNEL_TILE > 8:
i${K} += 8;
$if K == 0:
v128_t vprod${ABC[0:8]} = wasm_i16x8_mul(vi${K}x${ABC[0:8]}, vk${K}x${ABC[0:8]});
$elif K % 2 == 0 or K + 1 == KERNEL_TILE or not ADD16:
vprod${ABC[0:8]} = wasm_i16x8_mul(vi${K}x${ABC[0:8]}, vk${K}x${ABC[0:8]});
$else:
vprod${ABC[0:8]} = wasm_i16x8_add(vprod${ABC[0:8]}, wasm_i16x8_mul(vi${K}x${ABC[0:8]}, vk${K}x${ABC[0:8]}));
$if not ADD16 or K % 2 == 1 or K + 1 == KERNEL_TILE:
vacc${ABC[0:4]} = wasm_i32x4_add(vacc${ABC[0:4]}, ${WASM_X32X4_EXTEND_LOW_X16X8}(vprod${ABC[0:8]}));
vacc${ABC[4:8]} = wasm_i32x4_add(vacc${ABC[4:8]}, ${WASM_X32X4_EXTEND_HIGH_X16X8}(vprod${ABC[0:8]}));
$if CHANNEL_TILE > 8:
k += 8;
$if DATATYPE == "QU8":
vacc${ABC[0:4]} = wasm_i32x4_sub(vacc${ABC[0:4]}, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vsumx${ABC[0:8]}), vkernel_zero_point));
vacc${ABC[4:8]} = wasm_i32x4_sub(vacc${ABC[4:8]}, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vsumx${ABC[0:8]}), vkernel_zero_point));
vacc${ABC[0:4]} = wasm_f32x4_convert_i32x4(vacc${ABC[0:4]});
vacc${ABC[4:8]} = wasm_f32x4_convert_i32x4(vacc${ABC[4:8]});
$if DATATYPE == "QC8":
const v128_t vscale${ABC[0:4]} = wasm_v128_load((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(${XINT8_T})));
const v128_t vscale${ABC[4:8]} = wasm_v128_load((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(${XINT8_T}) + 4 * sizeof(float)));
vacc${ABC[0:4]} = wasm_f32x4_mul(vacc${ABC[0:4]}, vscale${ABC[0:4]});
vacc${ABC[4:8]} = wasm_f32x4_mul(vacc${ABC[4:8]}, vscale${ABC[4:8]});
$else:
const v128_t vscale = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.scale);
vacc${ABC[0:4]} = wasm_f32x4_mul(vacc${ABC[0:4]}, vscale);
vacc${ABC[4:8]} = wasm_f32x4_mul(vacc${ABC[4:8]}, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias);
vacc${ABC[0:4]} = wasm_f32x4_add(vacc${ABC[0:4]}, vmagic_bias);
vacc${ABC[4:8]} = wasm_f32x4_add(vacc${ABC[4:8]}, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_min);
vacc${ABC[0:4]} = wasm_i32x4_max(vacc${ABC[0:4]}, vmagic_min);
vacc${ABC[4:8]} = wasm_i32x4_max(vacc${ABC[4:8]}, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
vacc${ABC[0:4]} = wasm_i32x4_sub(vacc${ABC[0:4]}, vmagic_bias_less_output_zero_point);
vacc${ABC[4:8]} = wasm_i32x4_sub(vacc${ABC[4:8]}, vmagic_bias_less_output_zero_point);
v128_t vout${ABC[0:8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]});
v128_t vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[0:8]}, vout${ABC[0:8]});
const v128_t voutput_max = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.output_max);
vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MIN}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
$if CHANNEL_TILE > 8:
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
$if CHANNEL_TILE > 8:
if XNN_LIKELY(c >= 8) {
*((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
output += 8;
c -= 8;
} else {
if (c & 4) {
*((float*) output) = wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
output += 4;
}
uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
if (c & 2) {
*((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
vout${ABC[0:4]} >>= 16;
output += 2;
}
if (c & 1) {
*output = (${XINT8_T}) vout${ABC[0:4]};
output += 1;
}
c = 0;
}
$else:
if (c & 4) {
*((float*) output) = wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
output += 4;
}
uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
if (c & 2) {
*((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
vout${ABC[0:4]} >>= 16;
output += 2;
}
if (c & 1) {
*output = (${XINT8_T}) vout${ABC[0:4]};
output += 1;
}
}${" while (c != 0);" if CHANNEL_TILE > 8 else ""}
}
output = (${XINT8_T}*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}