blob: 1a4b3b398ac6a1e9d8d8c6f20186f622d294ae97 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert CHANNEL_TILE % 4 == 0
$assert CHANNEL_TILE >= 4
$assert ROW_TILE >= 1
$ABC = "0123456789ABCDEFGHIJKLMN"
#include <assert.h>
$if BLEND:
#include <smmintrin.h>
$else:
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__${"sse41" if BLEND else "sse2"}_${ROW_TILE}x${CHANNEL_TILE}(
size_t rows,
size_t channels,
const float*restrict input,
size_t input_stride,
const float*restrict weights,
float*restrict output,
size_t output_stride,
const union xnn_f32_output_params params[restrict static 1])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
$for M in range(1, ROW_TILE):
const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride);
float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride);
$if M % 2 == 0:
if XNN_UNPREDICTABLE(rows <= ${M}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
$else:
if XNN_UNPREDICTABLE(rows < ${M+1}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
const size_t input_increment = input_stride * ${ROW_TILE} - channels;
const size_t output_increment = output_stride * ${ROW_TILE} - channels;
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
do {
const float* w = weights;
size_t c = channels;
for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
const __m128 vw${ABC[0:4]} = _mm_load_ps(w);
$for C in range(4, CHANNEL_TILE, 4):
const __m128 vw${ABC[C:C+4]} = _mm_load_ps(w + ${C});
w += ${CHANNEL_TILE};
$for M in range(ROW_TILE):
const __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M});
$for C in range(4, CHANNEL_TILE, 4):
const __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C});
i${M} += ${CHANNEL_TILE};
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
const __m128 vprod${M}x${ABC[C:C+4]} = _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]});
$if not BLEND:
const __m128 vmask${M}x${ABC[C:C+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x${ABC[C:C+4]})));
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
$if BLEND:
__m128 vacc${M}x${ABC[C:C+4]} = _mm_blendv_ps(vi${M}x${ABC[C:C+4]}, vprod${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]});
$else:
__m128 vacc${M}x${ABC[C:C+4]} = _mm_or_ps(_mm_and_ps(vprod${M}x${ABC[C:C+4]}, vmask${M}x${ABC[C:C+4]}), _mm_andnot_ps(vmask${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]}));
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
vacc${M}x${ABC[C:C+4]} = _mm_max_ps(vacc${M}x${ABC[C:C+4]}, vmin);
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
vacc${M}x${ABC[C:C+4]} = _mm_min_ps(vacc${M}x${ABC[C:C+4]}, vmax);
$for M in range(ROW_TILE):
_mm_storeu_ps(o${M}, vacc${M}x${ABC[0:4]});
$for C in range(4, CHANNEL_TILE, 4):
_mm_storeu_ps(o${M} + ${C}, vacc${M}x${ABC[C:C+4]});
o${M} += ${CHANNEL_TILE};
}
$if CHANNEL_TILE > 4:
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
$for M in range(ROW_TILE):
const __m128 vi${M}x0123 = _mm_loadu_ps(i${M});
i${M} += 4;
$for M in range(ROW_TILE):
const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123);
$if not BLEND:
const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123)));
$for M in range(ROW_TILE):
$if BLEND:
__m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123);
$else:
__m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123));
$for M in range(ROW_TILE):
vacc${M}x0123 = _mm_max_ps(vacc${M}x0123, vmin);
$for M in range(ROW_TILE):
vacc${M}x0123 = _mm_min_ps(vacc${M}x0123, vmax);
$for M in range(ROW_TILE):
_mm_storeu_ps(o${M}, vacc${M}x0123);
o${M} += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
$for M in range(ROW_TILE):
const __m128 vi${M}x0123 = _mm_loadu_ps(i${M});
i${M} = (const float*) ((uintptr_t) i${M} + c);
$for M in range(ROW_TILE):
const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123);
$if not BLEND:
const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123)));
$for M in range(ROW_TILE):
$if BLEND:
__m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123);
$else:
__m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123));
$for M in range(ROW_TILE):
vacc${M}x0123 = _mm_max_ps(vacc${M}x0123, vmin);
$for M in range(ROW_TILE):
vacc${M}x0123 = _mm_min_ps(vacc${M}x0123, vmax);
if (c & (2 * sizeof(float))) {
$for M in range(ROW_TILE):
_mm_storel_pi((__m64*) o${M}, vacc${M}x0123);
$for M in range(ROW_TILE):
vacc${M}x0123 = _mm_movehl_ps(vacc${M}x0123, vacc${M}x0123);
$for M in range(ROW_TILE):
o${M} += 2;
}
if (c & (1 * sizeof(float))) {
$for M in range(ROW_TILE):
_mm_store_ss(o${M}, vacc${M}x0123);
$for M in range(ROW_TILE):
o${M} += 1;
}
}
$for M in range(ROW_TILE):
i${M} = (const float*) ((uintptr_t) i${M} + input_increment);
o${M} = (float*) ((uintptr_t) o${M} + output_increment);
$if M % 2 == 1:
if XNN_UNPREDICTABLE(rows < ${ROW_TILE + M+1}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
$elif M != 0:
if XNN_UNPREDICTABLE(rows <= ${ROW_TILE + M}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
rows = doz(rows, ${ROW_TILE});
} while (rows != 0);
}