blob: a72652cba93de99355390b06a2730418738e101b [file] [log] [blame]
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE >= 16
$assert BATCH_TILE % 16 == 0
$SIMD_TILE = BATCH_TILE // 16
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
$if AVX:
#include <immintrin.h>
$else:
#include <tmmintrin.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__${"avx" if AVX else "ssse3"}_x${BATCH_TILE}(
size_t n,
const uint8_t* x,
uint8_t* y,
const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
{
assert(n != 0);
assert(x != NULL);
assert(y != NULL);
const __m128i vt0 = _mm_load_si128((const __m128i*) t);
$for T in range(1, 16):
const __m128i vt${ABC[T]} = _mm_load_si128((const __m128i*) (t + ${T * 16}));
const __m128i vtable0 = vt0;
$for T in range(1, 8):
const __m128i vtable${ABC[T]} = _mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]});
$for T in range(8, 16):
const __m128i vtable${ABC[T]} = _mm_xor_si128(_mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]});
const __m128i voffset = _mm_set1_epi8(16);
$if BATCH_TILE > 16:
for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) {
__m128i vx0 = _mm_loadu_si128((const __m128i*) x);
$for N in range(1, SIMD_TILE):
__m128i vx${N} = _mm_loadu_si128((const __m128i*) (x + ${N * 16}));
x += ${BATCH_TILE};
$for N in range(SIMD_TILE):
__m128i vy${N} = _mm_shuffle_epi8(vtable0, vx${N});
$for T in range(1, 9):
$for N in range(SIMD_TILE):
vx${N} = _mm_sub_epi8(vx${N}, voffset);
$for N in range(SIMD_TILE):
vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N}));
$for T in range(9, 16):
$for N in range(SIMD_TILE):
vx${N} = _mm_subs_epi8(vx${N}, voffset);
$for N in range(SIMD_TILE):
vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N}));
_mm_storeu_si128((__m128i*) y, vy0);
$for N in range(1, SIMD_TILE):
_mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${N});
y += ${BATCH_TILE};
}
for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
__m128i vx = _mm_loadu_si128((const __m128i*) x);
x += 16;
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
$for T in range(1, 9):
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
$for T in range(9, 16):
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
_mm_storeu_si128((__m128i*) y, vy);
y += 16;
}
if XNN_UNLIKELY(n != 0) {
__m128i vx = _mm_loadu_si128((const __m128i*) x);
__m128i vy = _mm_shuffle_epi8(vtable0, vx);
$for T in range(1, 9):
vx = _mm_sub_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
$for T in range(9, 16):
vx = _mm_subs_epi8(vx, voffset);
vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
if (n & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) y, vy);
vy = _mm_unpackhi_epi64(vy, vy);
y += 8;
}
if (n & (4 * sizeof(uint8_t))) {
$if AVX:
_mm_storeu_si32(y, vy);
$else:
*((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vy);
vy = _mm_srli_epi64(vy, 32);
y += 4;
}
$if AVX:
if (n & (2 * sizeof(uint8_t))) {
*((uint16_t*) y) = (uint16_t) _mm_extract_epi16(vy, 0);
vy = _mm_srli_epi32(vy, 16);
y += 2;
}
if (n & (1 * sizeof(uint8_t))) {
*y = (uint8_t) _mm_extract_epi8(vy, 0);
}
$else:
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (n & (2 * sizeof(uint8_t))) {
*((uint16_t*) y) = (uint16_t) vy_lo;
vy_lo >>= 16;
y += 2;
}
if (n & (1 * sizeof(uint8_t))) {
*y = (uint8_t) vy_lo;
}
}
}