RS: Add VP9 LoopFilter Intrinsic
Change-Id: If1ac77774c74b5513ce7a2db4ef31888a351a9c5
diff --git a/cpp/ScriptIntrinsics.cpp b/cpp/ScriptIntrinsics.cpp
index f9a1d97..34b2162 100644
--- a/cpp/ScriptIntrinsics.cpp
+++ b/cpp/ScriptIntrinsics.cpp
@@ -644,3 +644,48 @@
Script::forEach(0, NULL, out, NULL, 0);
}
+
+sp<ScriptIntrinsicVP9LoopFilter> ScriptIntrinsicVP9LoopFilter::create(sp<RS> rs, sp<const Element> e) {
+ if (!(e->isCompatible(Element::U8(rs)))) {
+ rs->throwError(RS_ERROR_INVALID_ELEMENT, "Invalid element for Vp9LoopFilter");
+ return NULL;
+ }
+ return new ScriptIntrinsicVP9LoopFilter(rs, e);
+}
+
+ScriptIntrinsicVP9LoopFilter::ScriptIntrinsicVP9LoopFilter(sp<RS> rs, sp<const Element> e)
+ : ScriptIntrinsic(rs, RS_SCRIPT_INTRINSIC_ID_LOOP_FILTER, e) {
+ sp<const Type> t_pad = Type::create(rs, e, 1, 0, 0);
+ mPadAlloc = Allocation::createTyped(rs, t_pad, RS_ALLOCATION_MIPMAP_NONE, RS_ALLOCATION_USAGE_SCRIPT, NULL);
+}
+
+void ScriptIntrinsicVP9LoopFilter::setLoopFilterDomain(int start, int stop, int numPlanes, int miRows, int miCols) {
+ FieldPacker fp(20);
+ fp.add(start);
+ fp.add(stop);
+ fp.add(numPlanes);
+ fp.add(miRows);
+ fp.add(miCols);
+ Script::setVar(0, fp.getData(), fp.getLength());
+}
+
+void ScriptIntrinsicVP9LoopFilter::setBufferInfo(const BufferInfo *bufInfo) {
+ Script::setVar(1, bufInfo, sizeof(BufferInfo));
+}
+
+void ScriptIntrinsicVP9LoopFilter::setLoopFilterInfo(sp<Allocation> lfInfo) {
+ Script::setVar(2, lfInfo);
+}
+
+void ScriptIntrinsicVP9LoopFilter::setLoopFilterMasks(sp<Allocation> lfMasks) {
+ Script::setVar(3, lfMasks);
+}
+
+void ScriptIntrinsicVP9LoopFilter::forEach(sp<Allocation> frameBuffer) {
+ if (!(frameBuffer->getType()->getElement()->isCompatible(mElement))) {
+ mRS->throwError(RS_ERROR_INVALID_ELEMENT, "Invalid element for input/output in Vp9LoopFilter");
+ return;
+ }
+ Script::setVar(4, frameBuffer);
+ Script::forEach(0, mPadAlloc, NULL, NULL, 0);
+}
diff --git a/cpp/rsCppStructs.h b/cpp/rsCppStructs.h
index 805f072..8268b61 100644
--- a/cpp/rsCppStructs.h
+++ b/cpp/rsCppStructs.h
@@ -1883,6 +1883,134 @@
};
/**
+ * Intrinsic for vp9 loopfilter.
+ */
+#if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C)
+#define DECLARE_ALIGNED(n,typ,val) typ val __attribute__ ((aligned (n)))
+#elif defined(_MSC_VER)
+#define DECLARE_ALIGNED(n,typ,val) __declspec(align(n)) typ val
+#else
+#warning No alignment directives known for this compiler.
+#define DECLARE_ALIGNED(n,typ,val) typ val
+#endif
+
+#define TX_SIZES 4
+#define SIMD_WIDTH 16
+#define MAX_LOOP_FILTER 63
+#define MAX_SEGMENTS 8
+#define MAX_REF_FRAMES 4
+#define MAX_MODE_LF_DELTAS 2
+#define MB_MODE_COUNT 14
+
+/**
+ * Intrinsic for VP9 loop filter
+ */
+class ScriptIntrinsicVP9LoopFilter : public ScriptIntrinsic {
+ private:
+ ScriptIntrinsicVP9LoopFilter(sp<RS> rs, sp<const Element> e);
+ sp<Allocation> mPadAlloc;
+
+ public:
+ // This structure holds bit masks for all 8x8 blocks in a 64x64 region.
+ // Each 1 bit represents a position in which we want to apply the loop filter.
+ // Left_ entries refer to whether we apply a filter on the border to the
+ // left of the block. Above_ entries refer to whether or not to apply a
+ // filter on the above border. Int_ entries refer to whether or not to
+ // apply borders on the 4x4 edges within the 8x8 block that each bit
+ // represents.
+ // Since each transform is accompanied by a potentially different type of
+ // loop filter there is a different entry in the array for each transform size.
+ struct LoopFilterMask {
+ uint64_t left_y[TX_SIZES];
+ uint64_t above_y[TX_SIZES];
+ uint64_t int_4x4_y;
+ uint16_t left_uv[TX_SIZES];
+ uint16_t above_uv[TX_SIZES];
+ uint16_t int_4x4_uv;
+ uint8_t lfl_y[64];
+ uint8_t lfl_uv[16];
+ };
+ // Need to align this structure so when it is declared and
+ // passed it can be loaded into vector registers.
+ struct LoopFilterThresh {
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, mblim[SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, lim[SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, hev_thr[SIMD_WIDTH]);
+ };
+ struct LoopFilterInfoN {
+ LoopFilterThresh lfthr[MAX_LOOP_FILTER + 1];
+ uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
+ uint8_t mode_lf_lut[MB_MODE_COUNT];
+ };
+ struct BufferInfo {
+ int y_offset;
+ int u_offset;
+ int v_offset;
+ int y_stride;
+ int uv_stride;
+ };
+
+ /**
+ * Create an intrinsic for LoopFilter.
+ *
+ * Supported elements types are U8.
+ *
+ * @param[in] rs The RenderScript context
+ * @param[in] e Element type for output
+ *
+ * @return ScriptIntrinsicVP9LoopFilter
+ */
+ static sp<ScriptIntrinsicVP9LoopFilter> create(sp<RS> rs, sp<const Element> e);
+ /**
+ * Set loop filter domain.
+ *
+ * @param[in] start The start mi(mode info) row
+ * @param[in] stop The stop mi row
+ * @param[in] numPlanes The number of planes
+ * @param[in] miRows The number of mi rows
+ * @param[in] miCols The number of mi columns
+ */
+ void setLoopFilterDomain(int start, int stop, int numPlanes, int miRows, int miCols);
+ /**
+ * Set the layout info of the frame buffer(the parameter passed to forEach).
+ *
+ * @param[in] bufInfo The BufferInfo pointer contains the frame layout info
+ */
+ void setBufferInfo(const BufferInfo *bufInfo);
+ /**
+ * Set the loop filter info, including infomation like high edge variance thresholds
+ * and loop filter levels that apply to the whole frame.
+ *
+ * @param[in] lfInfo The Allocation obj that contains the LoopFilterInfoN object
+ */
+ void setLoopFilterInfo(sp<Allocation> lfInfo);
+ /**
+ * Set loop filter masks.
+ *
+ * @param[in] lfMasks The Allocation obj that contains the masks for each 64*64
+ * super block within the loop filter domain
+ */
+ void setLoopFilterMasks(sp<Allocation> lfMasks);
+
+ /**
+ * Apply loop filter on the frame.
+ *
+ * @param[in] frameBuffer The Allocation obj that contains the frame
+ */
+ void forEach(sp<Allocation> frameBuffer);
+};
+
+#undef DECLARE_ALIGNED
+
+#undef TX_SIZES
+#undef SIMD_WIDTH
+#undef MAX_LOOP_FILTER
+#undef MAX_SEGMENTS
+#undef MAX_REF_FRAMES
+#undef MAX_MODE_LF_DELTAS
+#undef MB_MODE_COUNT
+
+/**
* Sampler object that defines how Allocations can be read as textures
* within a kernel. Samplers are used in conjunction with the rsSample
* runtime function to return values from normalized coordinates.
diff --git a/cpu_ref/Android.mk b/cpu_ref/Android.mk
index c4e0ebf..970607a 100644
--- a/cpu_ref/Android.mk
+++ b/cpu_ref/Android.mk
@@ -30,6 +30,7 @@
rsCpuIntrinsicConvolve5x5.cpp \
rsCpuIntrinsicHistogram.cpp \
rsCpuIntrinsicInterPred.cpp \
+ rsCpuIntrinsicLoopFilter.cpp \
rsCpuIntrinsicLUT.cpp \
rsCpuIntrinsicYuvToRGB.cpp \
convolve/convolve.c
@@ -56,7 +57,10 @@
convolve/convolve_avg_neon.s \
convolve/convolve8_neon.s \
convolve/convolve8_avg_neon.s \
- convolve/convolve_neon.c
+ convolve/convolve_neon.c\
+ vp9_loopfilter_16_neon.S \
+ vp9_loopfilter_neon.S \
+ vp9_mb_lpf_neon.S
LOCAL_ASFLAGS_arm := -mfpu=neon
endif
@@ -65,6 +69,7 @@
LOCAL_C_INCLUDES += frameworks/compile/libbcc/include
LOCAL_C_INCLUDES += frameworks/rs
+LOCAL_C_INCLUDES += system/core/include
LOCAL_CFLAGS += $(rs_base_CFLAGS)
diff --git a/cpu_ref/rsCpuCore.cpp b/cpu_ref/rsCpuCore.cpp
index 7dfb3e2..a1ed1c9 100644
--- a/cpu_ref/rsCpuCore.cpp
+++ b/cpu_ref/rsCpuCore.cpp
@@ -524,6 +524,8 @@
const Script *s, const Element *e);
extern RsdCpuScriptImpl * rsdIntrinsic_Histogram(RsdCpuReferenceImpl *ctx,
const Script *s, const Element *e);
+extern RsdCpuScriptImpl * rsdIntrinsic_LoopFilter(RsdCpuReferenceImpl *ctx,
+ const Script *s, const Element *e);
RsdCpuReference::CpuScript * RsdCpuReferenceImpl::createIntrinsic(const Script *s,
RsScriptIntrinsicID iid, Element *e) {
@@ -562,6 +564,11 @@
case RS_SCRIPT_INTRINSIC_ID_HISTOGRAM:
i = rsdIntrinsic_Histogram(this, s, e);
break;
+#ifndef RS_COMPATIBILITY_LIB
+ case RS_SCRIPT_INTRINSIC_ID_LOOP_FILTER:
+ i = rsdIntrinsic_LoopFilter(this, s, e);
+ break;
+#endif
default:
rsAssert(0);
diff --git a/cpu_ref/rsCpuIntrinsicLoopFilter.cpp b/cpu_ref/rsCpuIntrinsicLoopFilter.cpp
new file mode 100644
index 0000000..d177b3c
--- /dev/null
+++ b/cpu_ref/rsCpuIntrinsicLoopFilter.cpp
@@ -0,0 +1,1229 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rsCpuIntrinsic.h"
+#include "rsCpuIntrinsicInlines.h"
+#include <sys/syscall.h>
+#include "cutils/atomic.h"
+
+#ifdef RS_COMPATIBILITY_LIB
+#include "rsCompatibilityLib.h"
+#endif
+
+#ifndef RS_COMPATIBILITY_LIB
+#include "hardware/gralloc.h"
+#endif
+
+
+#define INLINE inline
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+
+#define ROUND_POWER_OF_TWO(value, n) \
+ (((value) + (1 << ((n) - 1))) >> (n))
+
+
+#define MI_SIZE_LOG2 3
+#define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2) // 64 = 2^6
+
+#define MI_SIZE (1 << MI_SIZE_LOG2) // pixels per mi-unit
+#define MI_BLOCK_SIZE (1 << MI_BLOCK_SIZE_LOG2) // mi-units per max block
+
+#define MI_MASK (MI_BLOCK_SIZE - 1)
+
+#define SIMD_WIDTH 16
+#define MAX_LOOP_FILTER 63
+#define MAX_SEGMENTS 8
+#define MAX_REF_FRAMES 4
+#define MAX_MODE_LF_DELTAS 2
+#define MB_MODE_COUNT 14
+#define BLOCK_SIZES 13
+
+
+#if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C)
+#define DECLARE_ALIGNED(n,typ,val) typ val __attribute__ ((aligned (n)))
+#elif defined(_MSC_VER)
+#define DECLARE_ALIGNED(n,typ,val) __declspec(align(n)) typ val
+#else
+#warning No alignment directives known for this compiler.
+#define DECLARE_ALIGNED(n,typ,val) typ val
+#endif
+
+// block transform size
+typedef enum {
+ TX_4X4 = 0, // 4x4 transform
+ TX_8X8 = 1, // 8x8 transform
+ TX_16X16 = 2, // 16x16 transform
+ TX_32X32 = 3, // 32x32 transform
+ TX_SIZES
+} TX_SIZE;
+
+typedef enum {
+ PLANE_TYPE_Y_WITH_DC,
+ PLANE_TYPE_UV,
+} PLANE_TYPE;
+
+// This structure holds bit masks for all 8x8 blocks in a 64x64 region.
+// Each 1 bit represents a position in which we want to apply the loop filter.
+// Left_ entries refer to whether we apply a filter on the border to the
+// left of the block. Above_ entries refer to whether or not to apply a
+// filter on the above border. Int_ entries refer to whether or not to
+// apply borders on the 4x4 edges within the 8x8 block that each bit
+// represents.
+// Since each transform is accompanied by a potentially different type of
+// loop filter there is a different entry in the array for each transform size.
+struct LoopFilterMask {
+ uint64_t left_y[4];
+ uint64_t above_y[4];
+ uint64_t int_4x4_y;
+ unsigned short left_uv[4];
+ unsigned short above_uv[4];
+ unsigned short int_4x4_uv;
+ unsigned char lfl_y[64];
+ unsigned char lfl_uv[16];
+};
+
+// Need to align this structure so when it is declared and
+// passed it can be loaded into vector registers.
+struct LoopFilterThresh {
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, mblim[SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, lim[SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, hev_thr[SIMD_WIDTH]);
+};
+
+struct LoopFilterInfoN {
+ LoopFilterThresh lfthr[MAX_LOOP_FILTER + 1];
+ uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
+ uint8_t mode_lf_lut[MB_MODE_COUNT];
+};
+
+struct BufferInfo {
+ int y_offset;
+ int u_offset;
+ int v_offset;
+ int y_stride;
+ int uv_stride;
+};
+
+#define MAX_CPU_CORES 32
+#define MAX_MB_PLANE 3
+#define MAX_SB_COL 32
+
+struct LoopFilterProgressChart {
+ int start;
+ int stop;
+ int num_planes;
+ int mi_rows;
+ int mi_cols;
+ BufferInfo buf_info;
+ uint8_t *buffer_alloc;
+ LoopFilterInfoN *lf_info;
+ LoopFilterMask *lfms;
+
+ int wid;
+ int quit;
+ int doing;
+ volatile int32_t chart[MAX_SB_COL];
+ int32_t sb_row_pro;
+ pthread_t *tid;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *start_cond;
+ pthread_mutex_t *hmutex;
+ pthread_cond_t *finish;
+};
+
+using namespace android;
+using namespace android::renderscript;
+
+namespace android {
+namespace renderscript {
+
+
+class RsdCpuScriptIntrinsicLoopFilter : public RsdCpuScriptIntrinsic {
+private:
+ LoopFilterProgressChart mPrch;
+ int mWorkerCount;
+
+public:
+ virtual void populateScript(Script *);
+ virtual void setGlobalVar(uint32_t slot, const void *data, size_t dataLength);
+ virtual void setGlobalObj(uint32_t slot, ObjectBase *data);
+
+ virtual ~RsdCpuScriptIntrinsicLoopFilter();
+ RsdCpuScriptIntrinsicLoopFilter(RsdCpuReferenceImpl *ctx, const Script *s,
+ const Element *e);
+
+protected:
+ ObjectBaseRef<Allocation> mLfInfo;
+ ObjectBaseRef<Allocation> mLfMasks;
+ ObjectBaseRef<Allocation> mFrameBuffer;
+
+ void doLoopFilter();
+ static void kernel(const RsForEachStubParamStruct *p,
+ uint32_t xstart, uint32_t xend,
+ uint32_t instep, uint32_t outstep);
+};
+
+}
+}
+
+void RsdCpuScriptIntrinsicLoopFilter::kernel(const RsForEachStubParamStruct *p,
+ uint32_t xstart, uint32_t xend,
+ uint32_t instep, uint32_t outstep) {
+ RsdCpuScriptIntrinsicLoopFilter *cp = (RsdCpuScriptIntrinsicLoopFilter*)p->usr;
+ memset((void*)&cp->mPrch.chart, 0, sizeof(cp->mPrch.chart));
+ cp->mPrch.chart[0] = 0x0fffffff;
+ cp->mPrch.sb_row_pro = 0;
+ cp->mPrch.doing = cp->mWorkerCount;
+
+ int i = 0;
+ for (i = 0; i < cp->mWorkerCount; ++i) {
+ pthread_cond_signal(&cp->mPrch.start_cond[i]);
+ }
+ pthread_mutex_lock(cp->mPrch.hmutex);
+ if (cp->mPrch.doing) {
+ pthread_cond_wait(cp->mPrch.finish, cp->mPrch.hmutex);
+ }
+ pthread_mutex_unlock(cp->mPrch.hmutex);
+}
+
+
+void RsdCpuScriptIntrinsicLoopFilter::setGlobalVar(uint32_t slot,
+ const void *data,
+ size_t dataLength) {
+ rsAssert(slot >= 0 && slot < 2);
+ const int *dptr = (const int *)data;
+ switch (slot) {
+ case 0:
+ rsAssert(dataLength == sizeof(int) * 5);
+ mPrch.start = dptr[0];
+ mPrch.stop = dptr[1];
+ mPrch.num_planes = dptr[2];
+ mPrch.mi_rows = dptr[3];
+ mPrch.mi_cols = dptr[4];
+ break;
+ case 1:
+ rsAssert(dataLength == sizeof(BufferInfo));
+ mPrch.buf_info = *((BufferInfo*)data);
+ break;
+ default:
+ ALOGE("Non-exist global value slot: %d", slot);
+ rsAssert(0);
+ }
+}
+
+void RsdCpuScriptIntrinsicLoopFilter::setGlobalObj(uint32_t slot, ObjectBase *data) {
+ rsAssert(slot > 1 && slot < 5);
+ if (slot == 2) {
+ mLfInfo.set(static_cast<Allocation *>(data));
+ mPrch.lf_info = (LoopFilterInfoN *)mLfInfo->mHal.state.userProvidedPtr;
+ } else if (slot == 3) {
+ mLfMasks.set(static_cast<Allocation *>(data));
+ mPrch.lfms = (LoopFilterMask *)mLfMasks->mHal.state.userProvidedPtr;
+ } else {
+ mFrameBuffer.set(static_cast<Allocation *>(data));
+ mPrch.buffer_alloc = (uint8_t *)mFrameBuffer->mHal.state.userProvidedPtr;
+ }
+}
+
+RsdCpuScriptIntrinsicLoopFilter::~RsdCpuScriptIntrinsicLoopFilter() {
+ android_atomic_inc(&mPrch.quit);
+ int i = 0;
+ for (i = 0; i < mWorkerCount; ++i) {
+ pthread_cond_signal(&mPrch.start_cond[i]);
+ }
+ for (i = 0; i < mWorkerCount; ++i) {
+ pthread_join(mPrch.tid[i], NULL);
+ }
+ free(mPrch.tid);
+}
+
+void RsdCpuScriptIntrinsicLoopFilter::populateScript(Script *s) {
+ s->mHal.info.exportedVariableCount = 9;
+ s->mHal.info.exportedFunctionCount = 1;
+}
+
+RsdCpuScriptImpl * rsdIntrinsic_LoopFilter(RsdCpuReferenceImpl *ctx,
+ const Script *s, const Element *e) {
+ return new RsdCpuScriptIntrinsicLoopFilter(ctx, s, e);
+}
+
+extern "C" void vp9_lpf_vertical_16_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh);
+extern "C" void vp9_lpf_vertical_16_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh);
+extern "C" void vp9_lpf_vertical_16_dual_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh);
+extern "C" void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh);
+extern "C" void vp9_lpf_vertical_8_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count);
+extern "C" void vp9_lpf_vertical_8_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_vertical_8_dual_c(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_vertical_8_dual_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count);
+extern "C" void vp9_lpf_vertical_4_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_vertical_4_dual_c(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_horizontal_16_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_16_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_8_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_8_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_8_dual_c(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_horizontal_8_dual_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_horizontal_4_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_4_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int count);
+extern "C" void vp9_lpf_horizontal_4_dual_c(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+extern "C" void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int pitch,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1);
+
+
+#ifdef ARCH_ARM_HAVE_NEON
+#define vp9_lpf_vertical_16 vp9_lpf_vertical_16_neon
+#define vp9_lpf_vertical_16_dual vp9_lpf_vertical_16_dual_neon
+#define vp9_lpf_vertical_8 vp9_lpf_vertical_8_neon
+#define vp9_lpf_vertical_8_dual vp9_lpf_vertical_8_dual_neon
+#define vp9_lpf_vertical_4 vp9_lpf_vertical_4_neon
+#define vp9_lpf_vertical_4_dual vp9_lpf_vertical_4_dual_neon
+#define vp9_lpf_horizontal_16 vp9_lpf_horizontal_16_neon
+#define vp9_lpf_horizontal_8 vp9_lpf_horizontal_8_neon
+#define vp9_lpf_horizontal_8_dual vp9_lpf_horizontal_8_dual_neon
+#define vp9_lpf_horizontal_4 vp9_lpf_horizontal_4_neon
+#define vp9_lpf_horizontal_4_dual vp9_lpf_horizontal_4_dual_neon
+#else
+#define vp9_lpf_vertical_16 vp9_lpf_vertical_16_c
+#define vp9_lpf_vertical_16_dual vp9_lpf_vertical_16_dual_c
+#define vp9_lpf_vertical_8 vp9_lpf_vertical_8_c
+#define vp9_lpf_vertical_8_dual vp9_lpf_vertical_8_dual_c
+#define vp9_lpf_vertical_4 vp9_lpf_vertical_4_c
+#define vp9_lpf_vertical_4_dual vp9_lpf_vertical_4_dual_c
+#define vp9_lpf_horizontal_16 vp9_lpf_horizontal_16_c
+#define vp9_lpf_horizontal_8 vp9_lpf_horizontal_8_c
+#define vp9_lpf_horizontal_8_dual vp9_lpf_horizontal_8_dual_c
+#define vp9_lpf_horizontal_4 vp9_lpf_horizontal_4_c
+#define vp9_lpf_horizontal_4_dual vp9_lpf_horizontal_4_dual_c
+#endif
+
+
+void vp9_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_horizontal_8(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_horizontal_8(s + 8, p, blimit1, limit1, thresh1, 1);
+}
+
+void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
+}
+
+void vp9_lpf_vertical_8_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
+}
+
+void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh) {
+ vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
+ vp9_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
+}
+
+
+static INLINE int8_t signed_char_clamp(int t) {
+ return (int8_t)clamp(t, -128, 127);
+}
+
+// should we apply any filter at all: 11111111 yes, 00000000 no
+static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t mask = 0;
+ mask |= (abs(p3 - p2) > limit) * -1;
+ mask |= (abs(p2 - p1) > limit) * -1;
+ mask |= (abs(p1 - p0) > limit) * -1;
+ mask |= (abs(q1 - q0) > limit) * -1;
+ mask |= (abs(q2 - q1) > limit) * -1;
+ mask |= (abs(q3 - q2) > limit) * -1;
+ mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ return ~mask;
+}
+
+static INLINE int8_t flat_mask4(uint8_t thresh,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t mask = 0;
+ mask |= (abs(p1 - p0) > thresh) * -1;
+ mask |= (abs(q1 - q0) > thresh) * -1;
+ mask |= (abs(p2 - p0) > thresh) * -1;
+ mask |= (abs(q2 - q0) > thresh) * -1;
+ mask |= (abs(p3 - p0) > thresh) * -1;
+ mask |= (abs(q3 - q0) > thresh) * -1;
+ return ~mask;
+}
+
+static INLINE int8_t flat_mask5(uint8_t thresh,
+ uint8_t p4, uint8_t p3,
+ uint8_t p2, uint8_t p1,
+ uint8_t p0, uint8_t q0,
+ uint8_t q1, uint8_t q2,
+ uint8_t q3, uint8_t q4) {
+ int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3);
+ mask |= (abs(p4 - p0) > thresh) * -1;
+ mask |= (abs(q4 - q0) > thresh) * -1;
+ return ~mask;
+}
+
+// is there high edge variance internal edge: 11111111 yes, 00000000 no
+static INLINE int8_t hev_mask(uint8_t thresh, uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1) {
+ int8_t hev = 0;
+ hev |= (abs(p1 - p0) > thresh) * -1;
+ hev |= (abs(q1 - q0) > thresh) * -1;
+ return hev;
+}
+
+static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1,
+ uint8_t *op0, uint8_t *oq0, uint8_t *oq1) {
+ int8_t filter1, filter2;
+
+ const int8_t ps1 = (int8_t) *op1 ^ 0x80;
+ const int8_t ps0 = (int8_t) *op0 ^ 0x80;
+ const int8_t qs0 = (int8_t) *oq0 ^ 0x80;
+ const int8_t qs1 = (int8_t) *oq1 ^ 0x80;
+ const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1);
+
+ // add outer taps if we have high edge variance
+ int8_t filter = signed_char_clamp(ps1 - qs1) & hev;
+
+ // inner taps
+ filter = signed_char_clamp(filter + 3 * (qs0 - ps0)) & mask;
+
+ // save bottom 3 bits so that we round one side +4 and the other +3
+ // if it equals 4 we'll set to adjust by -1 to account for the fact
+ // we'd round 3 the other way
+ filter1 = signed_char_clamp(filter + 4) >> 3;
+ filter2 = signed_char_clamp(filter + 3) >> 3;
+
+ *oq0 = signed_char_clamp(qs0 - filter1) ^ 0x80;
+ *op0 = signed_char_clamp(ps0 + filter2) ^ 0x80;
+
+ // outer tap adjustments
+ filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev;
+
+ *oq1 = signed_char_clamp(qs1 - filter) ^ 0x80;
+ *op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
+}
+
+void vp9_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
+ const uint8_t *blimit, const uint8_t *limit,
+ const uint8_t *thresh, int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p);
+ ++s;
+ }
+}
+
+void vp9_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+ const uint8_t *limit0, const uint8_t *thresh0,
+ const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1);
+}
+
+void vp9_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ filter4(mask, *thresh, s - 2, s - 1, s, s + 1);
+ s += pitch;
+ }
+}
+
+void vp9_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+ const uint8_t *limit0, const uint8_t *thresh0,
+ const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1);
+}
+
+static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
+ uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0,
+ uint8_t *oq0, uint8_t *oq1,
+ uint8_t *oq2, uint8_t *oq3) {
+ if (flat && mask) {
+ const uint8_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
+
+ // 7-tap filter [1, 1, 1, 2, 1, 1, 1]
+ *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0, 3);
+ *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1, 3);
+ *op0 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2, 3);
+ *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3, 3);
+ *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3);
+ *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3);
+ } else {
+ filter4(mask, thresh, op1, op0, oq0, oq1);
+ }
+}
+
+void vp9_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p);
+ ++s;
+ }
+}
+
+void vp9_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+ const uint8_t *limit0, const uint8_t *thresh0,
+ const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1);
+}
+
+void vp9_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ int i;
+
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3);
+ s += pitch;
+ }
+}
+
+void vp9_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
+ const uint8_t *limit0, const uint8_t *thresh0,
+ const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1);
+}
+
+static INLINE void filter16(int8_t mask, uint8_t thresh,
+ uint8_t flat, uint8_t flat2,
+ uint8_t *op7, uint8_t *op6,
+ uint8_t *op5, uint8_t *op4,
+ uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0,
+ uint8_t *oq0, uint8_t *oq1,
+ uint8_t *oq2, uint8_t *oq3,
+ uint8_t *oq4, uint8_t *oq5,
+ uint8_t *oq6, uint8_t *oq7) {
+ if (flat2 && flat && mask) {
+ const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4,
+ p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3,
+ q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
+
+ // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
+ *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0, 4);
+ *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1, 4);
+ *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2, 4);
+ *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3, 4);
+ *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4, 4);
+ *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5, 4);
+ *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6, 4);
+ *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4);
+ *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4);
+ *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4);
+ *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
+ *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
+ *oq5 = ROUND_POWER_OF_TWO(p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
+ *oq6 = ROUND_POWER_OF_TWO(p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
+ } else {
+ filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3);
+ }
+}
+
+void vp9_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0,
+ q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
+
+ filter16(mask, *thresh, flat, flat2,
+ s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
+ s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p,
+ s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p);
+ ++s;
+ }
+}
+
+static void mb_lpf_vertical_edge_w(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0,
+ q0, s[4], s[5], s[6], s[7]);
+
+ filter16(mask, *thresh, flat, flat2,
+ s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7);
+ s += p;
+ }
+}
+
+void vp9_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8);
+}
+
+void vp9_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
+}
+
+
+static void filter_selectively_vert_row2(PLANE_TYPE plane_type,
+ uint8_t *s, int pitch,
+ unsigned int mask_16x16_l,
+ unsigned int mask_8x8_l,
+ unsigned int mask_4x4_l,
+ unsigned int mask_4x4_int_l,
+ const LoopFilterInfoN *lfi_n,
+ const uint8_t *lfl) {
+ const int mask_shift = plane_type ? 4 : 8;
+ const int mask_cutoff = plane_type ? 0xf : 0xff;
+ const int lfl_forward = plane_type ? 4 : 8;
+
+ unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
+ unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
+ unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
+ unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
+ unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
+ unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
+ unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
+ unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
+ unsigned int mask;
+
+ for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
+ mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
+ mask; mask >>= 1) {
+ const LoopFilterThresh *lfi0 = lfi_n->lfthr + *lfl;
+ const LoopFilterThresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
+
+ // TODO(yunqingwang): count in loopfilter functions should be removed.
+ if (mask & 1) {
+ if ((mask_16x16_0 | mask_16x16_1) & 1) {
+ if ((mask_16x16_0 & mask_16x16_1) & 1) {
+ vp9_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr);
+ } else if (mask_16x16_0 & 1) {
+ vp9_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr);
+ } else {
+ vp9_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
+ lfi1->lim, lfi1->hev_thr);
+ }
+ }
+
+ if ((mask_8x8_0 | mask_8x8_1) & 1) {
+ if ((mask_8x8_0 & mask_8x8_1) & 1) {
+ vp9_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, lfi1->mblim, lfi1->lim,
+ lfi1->hev_thr);
+ } else if (mask_8x8_0 & 1) {
+ vp9_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, 1);
+ } else {
+ vp9_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+ lfi1->hev_thr, 1);
+ }
+ }
+
+ if ((mask_4x4_0 | mask_4x4_1) & 1) {
+ if ((mask_4x4_0 & mask_4x4_1) & 1) {
+ vp9_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, lfi1->mblim, lfi1->lim,
+ lfi1->hev_thr);
+ } else if (mask_4x4_0 & 1) {
+ vp9_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, 1);
+ } else {
+ vp9_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+ lfi1->hev_thr, 1);
+ }
+ }
+
+ if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
+ if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
+ vp9_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, lfi1->mblim, lfi1->lim,
+ lfi1->hev_thr);
+ } else if (mask_4x4_int_0 & 1) {
+ vp9_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
+ lfi0->hev_thr, 1);
+ } else {
+ vp9_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
+ lfi1->lim, lfi1->hev_thr, 1);
+ }
+ }
+ }
+
+ s += 8;
+ lfl += 1;
+ mask_16x16_0 >>= 1;
+ mask_8x8_0 >>= 1;
+ mask_4x4_0 >>= 1;
+ mask_4x4_int_0 >>= 1;
+ mask_16x16_1 >>= 1;
+ mask_8x8_1 >>= 1;
+ mask_4x4_1 >>= 1;
+ mask_4x4_int_1 >>= 1;
+ }
+}
+
+static void filter_selectively_horiz(uint8_t *s, int pitch,
+ unsigned int mask_16x16,
+ unsigned int mask_8x8,
+ unsigned int mask_4x4,
+ unsigned int mask_4x4_int,
+ const LoopFilterInfoN *lfi_n,
+ const uint8_t *lfl) {
+ unsigned int mask;
+ int count;
+
+ for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
+ mask; mask >>= count) {
+ const LoopFilterThresh *lfi = lfi_n->lfthr + *lfl;
+
+ count = 1;
+ if (mask & 1) {
+ if (mask_16x16 & 1) {
+ if ((mask_16x16 & 3) == 3) {
+ vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+ count = 2;
+ } else {
+ vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+ } else if (mask_8x8 & 1) {
+ if ((mask_8x8 & 3) == 3) {
+ // Next block's thresholds
+ const LoopFilterThresh *lfin = lfi_n->lfthr + *(lfl + 1);
+
+ vp9_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, lfin->mblim, lfin->lim,
+ lfin->hev_thr);
+
+ if ((mask_4x4_int & 3) == 3) {
+ vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, lfin->mblim,
+ lfin->lim, lfin->hev_thr);
+ } else {
+ if (mask_4x4_int & 1)
+ vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+ else if (mask_4x4_int & 2)
+ vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ lfin->lim, lfin->hev_thr, 1);
+ }
+ count = 2;
+ } else {
+ vp9_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+
+ if (mask_4x4_int & 1)
+ vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+ }
+ } else if (mask_4x4 & 1) {
+ if ((mask_4x4 & 3) == 3) {
+ // Next block's thresholds
+ const LoopFilterThresh *lfin = lfi_n->lfthr + *(lfl + 1);
+
+ vp9_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, lfin->mblim, lfin->lim,
+ lfin->hev_thr);
+ if ((mask_4x4_int & 3) == 3) {
+ vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, lfin->mblim,
+ lfin->lim, lfin->hev_thr);
+ } else {
+ if (mask_4x4_int & 1)
+ vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+ else if (mask_4x4_int & 2)
+ vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
+ lfin->lim, lfin->hev_thr, 1);
+ }
+ count = 2;
+ } else {
+ vp9_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+
+ if (mask_4x4_int & 1)
+ vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+ } else if (mask_4x4_int & 1) {
+ vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+ }
+ s += 8 * count;
+ lfl += count;
+ mask_16x16 >>= count;
+ mask_8x8 >>= count;
+ mask_4x4 >>= count;
+ mask_4x4_int >>= count;
+ }
+}
+
+static void filter_block_plane_y(LoopFilterInfoN *lf_info,
+ LoopFilterMask *lfm,
+ int stride,
+ uint8_t *buf,
+ int mi_rows,
+ int mi_row) {
+ uint8_t* dst0 = buf;
+ int r; //, c;
+
+ uint64_t mask_16x16 = lfm->left_y[TX_16X16];
+ uint64_t mask_8x8 = lfm->left_y[TX_8X8];
+ uint64_t mask_4x4 = lfm->left_y[TX_4X4];
+ uint64_t mask_4x4_int = lfm->int_4x4_y;
+
+ // Vertical pass: do 2 rows at one time
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < mi_rows; r += 2) {
+ unsigned int mask_16x16_l = mask_16x16 & 0xffff;
+ unsigned int mask_8x8_l = mask_8x8 & 0xffff;
+ unsigned int mask_4x4_l = mask_4x4 & 0xffff;
+ unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
+
+ // Disable filtering on the leftmost column
+ filter_selectively_vert_row2(PLANE_TYPE_Y_WITH_DC, buf, stride,
+ mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, lf_info,
+ &lfm->lfl_y[r << 3]);
+
+ buf += 16 * stride;
+ mask_16x16 >>= 16;
+ mask_8x8 >>= 16;
+ mask_4x4 >>= 16;
+ mask_4x4_int >>= 16;
+ }
+
+ // Horizontal pass
+ buf = dst0;
+ mask_16x16 = lfm->above_y[TX_16X16];
+ mask_8x8 = lfm->above_y[TX_8X8];
+ mask_4x4 = lfm->above_y[TX_4X4];
+ mask_4x4_int = lfm->int_4x4_y;
+
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < mi_rows; r++) {
+ unsigned int mask_16x16_r;
+ unsigned int mask_8x8_r;
+ unsigned int mask_4x4_r;
+
+ if (mi_row + r == 0) {
+ mask_16x16_r = 0;
+ mask_8x8_r = 0;
+ mask_4x4_r = 0;
+ } else {
+ mask_16x16_r = mask_16x16 & 0xff;
+ mask_8x8_r = mask_8x8 & 0xff;
+ mask_4x4_r = mask_4x4 & 0xff;
+ }
+
+ filter_selectively_horiz(buf, stride, mask_16x16_r, mask_8x8_r,
+ mask_4x4_r, mask_4x4_int & 0xff, lf_info, &lfm->lfl_y[r << 3]);
+
+ buf += 8 * stride;
+ mask_16x16 >>= 8;
+ mask_8x8 >>= 8;
+ mask_4x4 >>= 8;
+ mask_4x4_int >>= 8;
+ }
+}
+
+static void filter_block_plane_uv(LoopFilterInfoN *lf_info,
+ LoopFilterMask *lfm,
+ int stride,
+ uint8_t *buf,
+ int mi_rows,
+ int mi_row) {
+ uint8_t* dst0 = buf;
+ int r, c;
+
+ uint16_t mask_16x16 = lfm->left_uv[TX_16X16];
+ uint16_t mask_8x8 = lfm->left_uv[TX_8X8];
+ uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
+ uint16_t mask_4x4_int = lfm->int_4x4_uv;
+
+ // Vertical pass: do 2 rows at one time
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < mi_rows; r += 4) {
+
+ for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) {
+ lfm->lfl_uv[(r << 1) + c] = lfm->lfl_y[(r << 3) + (c << 1)];
+ lfm->lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) + (c << 1)];
+ }
+
+ {
+ unsigned int mask_16x16_l = mask_16x16 & 0xff;
+ unsigned int mask_8x8_l = mask_8x8 & 0xff;
+ unsigned int mask_4x4_l = mask_4x4 & 0xff;
+ unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
+
+ // Disable filtering on the leftmost column
+ filter_selectively_vert_row2(PLANE_TYPE_UV, buf, stride,
+ mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l,
+ lf_info, &lfm->lfl_uv[r << 1]);
+
+ buf += 16 * stride;
+ mask_16x16 >>= 8;
+ mask_8x8 >>= 8;
+ mask_4x4 >>= 8;
+ mask_4x4_int >>= 8;
+ }
+ }
+
+ // Horizontal pass
+ buf = dst0;
+ mask_16x16 = lfm->above_uv[TX_16X16];
+ mask_8x8 = lfm->above_uv[TX_8X8];
+ mask_4x4 = lfm->above_uv[TX_4X4];
+ mask_4x4_int = lfm->int_4x4_uv;
+
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < mi_rows; r += 2) {
+ int skip_border_4x4_r = mi_row + r == mi_rows - 1;
+ unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : (mask_4x4_int & 0xf);
+ unsigned int mask_16x16_r;
+ unsigned int mask_8x8_r;
+ unsigned int mask_4x4_r;
+
+ if (mi_row + r == 0) {
+ mask_16x16_r = 0;
+ mask_8x8_r = 0;
+ mask_4x4_r = 0;
+ } else {
+ mask_16x16_r = mask_16x16 & 0xf;
+ mask_8x8_r = mask_8x8 & 0xf;
+ mask_4x4_r = mask_4x4 & 0xf;
+ }
+
+ filter_selectively_horiz(buf, stride, mask_16x16_r, mask_8x8_r,
+ mask_4x4_r, mask_4x4_int_r, lf_info, &lfm->lfl_uv[r << 1]);
+
+ buf += 8 * stride;
+ mask_16x16 >>= 4;
+ mask_8x8 >>= 4;
+ mask_4x4 >>= 4;
+ mask_4x4_int >>= 4;
+ }
+}
+
+static void *vp9_loop_filter_rows_work_proc(void *data) {
+ LoopFilterProgressChart *param = (LoopFilterProgressChart *)data;
+ int wid = android_atomic_inc(¶m->wid);
+ int sb_row;
+ int mi_row, mi_col;
+ int lfm_idx;
+ uint8_t *buf_start[MAX_MB_PLANE];
+ uint8_t *buf[MAX_MB_PLANE];
+ BufferInfo *buf_info = ¶m->buf_info;
+
+ while (!android_atomic_release_load(¶m->quit)) {
+ pthread_mutex_lock(¶m->mutex[wid]);
+ pthread_cond_wait(¶m->start_cond[wid], ¶m->mutex[wid]);
+ pthread_mutex_unlock(¶m->mutex[wid]);
+
+ if (android_atomic_release_load(¶m->quit)) return NULL;
+
+ buf_start[0] = param->buffer_alloc + buf_info->y_offset;
+ buf_start[1] = param->buffer_alloc + buf_info->u_offset;
+ buf_start[2] = param->buffer_alloc + buf_info->v_offset;
+ sb_row = android_atomic_inc(¶m->sb_row_pro);
+ mi_row = (sb_row * MI_BLOCK_SIZE) + param->start;
+
+ while (mi_row < param->stop) {
+ buf[0] = buf_start[0] + (mi_row * buf_info->y_stride << 3);
+ buf[1] = buf_start[1] + (mi_row * buf_info->uv_stride << 2);
+ buf[2] = buf_start[2] + (mi_row * buf_info->uv_stride << 2);
+ lfm_idx = sb_row * ((param->mi_cols + 7) >> 3);
+ for (mi_col = 0; mi_col < param->mi_cols; mi_col += MI_BLOCK_SIZE) {
+
+ while (param->chart[sb_row+1] + 2 > android_atomic_release_load(¶m->chart[sb_row])) {
+ usleep(1);
+ }
+
+ filter_block_plane_y(param->lf_info, param->lfms + lfm_idx,
+ buf_info->y_stride, buf[0], param->mi_rows,
+ mi_row);
+ mi_col += MI_BLOCK_SIZE;
+ if (mi_col < param->mi_cols) {
+ lfm_idx++;
+ buf[0] += MI_BLOCK_SIZE * MI_BLOCK_SIZE;
+ filter_block_plane_y(param->lf_info, param->lfms + lfm_idx,
+ buf_info->y_stride, buf[0],
+ param->mi_rows, mi_row);
+ }
+ buf[0] += MI_BLOCK_SIZE * MI_BLOCK_SIZE;
+ if (param->num_planes > 1) {
+ lfm_idx--;
+ filter_block_plane_uv(param->lf_info, param->lfms + lfm_idx,
+ buf_info->uv_stride, buf[1],
+ param->mi_rows, mi_row);
+ filter_block_plane_uv(param->lf_info, param->lfms + lfm_idx,
+ buf_info->uv_stride, buf[2],
+ param->mi_rows, mi_row);
+ if (mi_col < param->mi_cols) {
+ lfm_idx++;
+ buf[1] += MI_BLOCK_SIZE * MI_BLOCK_SIZE >> 1;
+ buf[2] += MI_BLOCK_SIZE * MI_BLOCK_SIZE >> 1;
+ filter_block_plane_uv(param->lf_info,
+ param->lfms + lfm_idx,
+ buf_info->uv_stride, buf[1],
+ param->mi_rows, mi_row);
+ filter_block_plane_uv(param->lf_info,
+ param->lfms + lfm_idx,
+ buf_info->uv_stride, buf[2],
+ param->mi_rows, mi_row);
+ }
+ buf[1] += MI_BLOCK_SIZE * MI_BLOCK_SIZE >> 1;
+ buf[2] += MI_BLOCK_SIZE * MI_BLOCK_SIZE >> 1;
+ }
+ lfm_idx++;
+ android_atomic_inc(¶m->chart[sb_row+1]);
+ }
+ android_atomic_inc(¶m->chart[sb_row+1]);
+ sb_row = android_atomic_inc(¶m->sb_row_pro);
+ mi_row = (sb_row << 3) + param->start;
+ }
+
+ pthread_mutex_lock(param->hmutex);
+ if ((--param->doing) == 0)
+ pthread_cond_signal(param->finish);
+ pthread_mutex_unlock(param->hmutex);
+ }
+
+ return NULL;
+}
+
+RsdCpuScriptIntrinsicLoopFilter::RsdCpuScriptIntrinsicLoopFilter(
+ RsdCpuReferenceImpl *ctx, const Script *s, const Element *e)
+ : RsdCpuScriptIntrinsic(ctx, s, e, RS_SCRIPT_INTRINSIC_ID_YUV_TO_RGB) {
+ mRootPtr = &kernel;
+ mWorkerCount = sysconf(_SC_NPROCESSORS_ONLN);
+ mPrch.quit = 0;
+ mPrch.wid = 0;
+ mPrch.sb_row_pro = 0;
+ mPrch.doing = mWorkerCount;
+ int size = mWorkerCount * sizeof(pthread_t) +
+ mWorkerCount * sizeof(pthread_mutex_t) +
+ mWorkerCount * sizeof(pthread_cond_t) +
+ sizeof(pthread_mutex_t) + sizeof(pthread_cond_t);
+ uint8_t *ptr = (uint8_t *)malloc(size);
+ rsAssert(ptr);
+ mPrch.tid = (pthread_t *)ptr;
+ mPrch.mutex = (pthread_mutex_t *) (mPrch.tid + mWorkerCount);
+ mPrch.start_cond = (pthread_cond_t *) (mPrch.mutex + mWorkerCount);
+ mPrch.hmutex = (pthread_mutex_t *) (mPrch.start_cond + mWorkerCount);
+ mPrch.finish = (pthread_cond_t *) (mPrch.hmutex + 1);
+ int i = 0;
+ int rv = 0;
+ pthread_mutex_init(mPrch.hmutex, NULL);
+ pthread_cond_init(mPrch.finish, NULL);
+ for (i = 0; i < mWorkerCount; ++i) {
+ pthread_mutex_init(&mPrch.mutex[i], NULL);
+ pthread_cond_init(&mPrch.start_cond[i], NULL);
+ }
+ for (i = 0; i < mWorkerCount; ++i) {
+ rv = pthread_create(&mPrch.tid[i], NULL, &vp9_loop_filter_rows_work_proc, &mPrch);
+ rsAssert(rv == 0);
+ }
+}
+
diff --git a/cpu_ref/vp9_loopfilter_16_neon.S b/cpu_ref/vp9_loopfilter_16_neon.S
new file mode 100644
index 0000000..490b9b8
--- /dev/null
+++ b/cpu_ref/vp9_loopfilter_16_neon.S
@@ -0,0 +1,220 @@
+@ This file was created from a .asm file
+@ using the ads2gas.pl script.
+ .equ DO1STROUNDING, 0
+@
+@ Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS. All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+@
+@ Copyright (c) 2014 The Android Open Source Project
+@
+@ Licensed under the Apache License, Version 2.0 (the "License");
+@ you may not use this file except in compliance with the License.
+@ You may obtain a copy of the License at
+@
+@ http://www.apache.org/licenses/LICENSE-2.0
+@
+@ Unless required by applicable law or agreed to in writing, software
+@ distributed under the License is distributed on an "AS IS" BASIS,
+@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@ See the License for the specific language governing permissions and
+@ limitations under the License.
+
+ .global vp9_lpf_horizontal_4_dual_neon
+ .type vp9_lpf_horizontal_4_dual_neon, function
+ .arm
+
+.text
+.p2align 2
+
+@void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
+@ const uint8_t *blimit0,
+@ const uint8_t *limit0,
+@ const uint8_t *thresh0,
+@ const uint8_t *blimit1,
+@ const uint8_t *limit1,
+@ const uint8_t *thresh1)
+@ r0 uint8_t *s,
+@ r1 int p,
+@ r2 const uint8_t *blimit0,
+@ r3 const uint8_t *limit0,
+@ sp const uint8_t *thresh0,
+@ sp+4 const uint8_t *blimit1,
+@ sp+8 const uint8_t *limit1,
+@ sp+12 const uint8_t *thresh1,
+
+_vp9_lpf_horizontal_4_dual_neon:
+ vp9_lpf_horizontal_4_dual_neon: @ PROC
+ push {lr}
+
+ ldr r12, [sp, #4] @ load thresh0
+ vld1.8 {d0}, [r2] @ load blimit0 to first half q
+ vld1.8 {d2}, [r3] @ load limit0 to first half q
+
+ add r1, r1, r1 @ double pitch
+ ldr r2, [sp, #8] @ load blimit1
+
+ vld1.8 {d4}, [r12] @ load thresh0 to first half q
+
+ ldr r3, [sp, #12] @ load limit1
+ ldr r12, [sp, #16] @ load thresh1
+ vld1.8 {d1}, [r2] @ load blimit1 to 2nd half q
+
+ sub r2, r0, r1, lsl #1 @ s[-4 * p]
+
+ vld1.8 {d3}, [r3] @ load limit1 to 2nd half q
+ vld1.8 {d5}, [r12] @ load thresh1 to 2nd half q
+
+ vpush {d8-d15} @ save neon registers
+
+ add r3, r2, r1, lsr #1 @ s[-3 * p]
+
+ vld1.u8 {q3}, [r2,:64], r1 @ p3
+ vld1.u8 {q4}, [r3,:64], r1 @ p2
+ vld1.u8 {q5}, [r2,:64], r1 @ p1
+ vld1.u8 {q6}, [r3,:64], r1 @ p0
+ vld1.u8 {q7}, [r2,:64], r1 @ q0
+ vld1.u8 {q8}, [r3,:64], r1 @ q1
+ vld1.u8 {q9}, [r2,:64] @ q2
+ vld1.u8 {q10}, [r3,:64] @ q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon_16
+
+ vst1.u8 {q5}, [r2,:64], r1 @ store op1
+ vst1.u8 {q6}, [r3,:64], r1 @ store op0
+ vst1.u8 {q7}, [r2,:64], r1 @ store oq0
+ vst1.u8 {q8}, [r3,:64], r1 @ store oq1
+
+ vpop {d8-d15} @ restore neon registers
+
+ pop {pc}
+ .size vp9_lpf_horizontal_4_dual_neon, .-vp9_lpf_horizontal_4_dual_neon @ ENDP @ |vp9_lpf_horizontal_4_dual_neon|
+
+@ void vp9_loop_filter_neon_16();
+@ This is a helper function for the loopfilters. The invidual functions do the
+@ necessary load, transpose (if necessary) and store. This function uses
+@ registers d8-d15, so the calling function must save those registers.
+@
+@ r0-r3, r12 PRESERVE
+@ q0 blimit
+@ q1 limit
+@ q2 thresh
+@ q3 p3
+@ q4 p2
+@ q5 p1
+@ q6 p0
+@ q7 q0
+@ q8 q1
+@ q9 q2
+@ q10 q3
+@
+@ Outputs:
+@ q5 op1
+@ q6 op0
+@ q7 oq0
+@ q8 oq1
+_vp9_loop_filter_neon_16:
+ vp9_loop_filter_neon_16: @ PROC
+
+ @ filter_mask
+ vabd.u8 q11, q3, q4 @ m1 = abs(p3 - p2)
+ vabd.u8 q12, q4, q5 @ m2 = abs(p2 - p1)
+ vabd.u8 q13, q5, q6 @ m3 = abs(p1 - p0)
+ vabd.u8 q14, q8, q7 @ m4 = abs(q1 - q0)
+ vabd.u8 q3, q9, q8 @ m5 = abs(q2 - q1)
+ vabd.u8 q4, q10, q9 @ m6 = abs(q3 - q2)
+
+ @ only compare the largest value to limit
+ vmax.u8 q11, q11, q12 @ m7 = max(m1, m2)
+ vmax.u8 q12, q13, q14 @ m8 = max(m3, m4)
+
+ vabd.u8 q9, q6, q7 @ abs(p0 - q0)
+
+ vmax.u8 q3, q3, q4 @ m9 = max(m5, m6)
+
+ vmov.u8 q10, #0x80
+
+ vmax.u8 q15, q11, q12 @ m10 = max(m7, m8)
+
+ vcgt.u8 q13, q13, q2 @ (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 q14, q14, q2 @ (abs(q1 - q0) > thresh)*-1
+ vmax.u8 q15, q15, q3 @ m11 = max(m10, m9)
+
+ vabd.u8 q2, q5, q8 @ a = abs(p1 - q1)
+ vqadd.u8 q9, q9, q9 @ b = abs(p0 - q0) * 2
+
+ veor q7, q7, q10 @ qs0
+
+ vcge.u8 q15, q1, q15 @ abs(m11) > limit
+
+ vshr.u8 q2, q2, #1 @ a = a / 2
+ veor q6, q6, q10 @ ps0
+
+ veor q5, q5, q10 @ ps1
+ vqadd.u8 q9, q9, q2 @ a = b + a
+
+ veor q8, q8, q10 @ qs1
+
+ vmov.u16 q4, #3
+
+ vsubl.s8 q2, d14, d12 @ ( qs0 - ps0)
+ vsubl.s8 q11, d15, d13
+
+ vcge.u8 q9, q0, q9 @ a > blimit
+
+ vqsub.s8 q1, q5, q8 @ filter = clamp(ps1-qs1)
+ vorr q14, q13, q14 @ hev
+
+ vmul.i16 q2, q2, q4 @ 3 * ( qs0 - ps0)
+ vmul.i16 q11, q11, q4
+
+ vand q1, q1, q14 @ filter &= hev
+ vand q15, q15, q9 @ mask
+
+ vmov.u8 q4, #3
+
+ vaddw.s8 q2, q2, d2 @ filter + 3 * (qs0 - ps0)
+ vaddw.s8 q11, q11, d3
+
+ vmov.u8 q9, #4
+
+ @ filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d2, q2
+ vqmovn.s16 d3, q11
+ vand q1, q1, q15 @ filter &= mask
+
+ vqadd.s8 q2, q1, q4 @ filter2 = clamp(filter+3)
+ vqadd.s8 q1, q1, q9 @ filter1 = clamp(filter+4)
+ vshr.s8 q2, q2, #3 @ filter2 >>= 3
+ vshr.s8 q1, q1, #3 @ filter1 >>= 3
+
+
+ vqadd.s8 q11, q6, q2 @ u = clamp(ps0 + filter2)
+ vqsub.s8 q0, q7, q1 @ u = clamp(qs0 - filter1)
+
+ @ outer tap adjustments
+ vrshr.s8 q1, q1, #1 @ filter = ++filter1 >> 1
+
+ veor q7, q0, q10 @ *oq0 = u^0x80
+
+ vbic q1, q1, q14 @ filter &= ~hev
+
+ vqadd.s8 q13, q5, q1 @ u = clamp(ps1 + filter)
+ vqsub.s8 q12, q8, q1 @ u = clamp(qs1 - filter)
+
+ veor q6, q11, q10 @ *op0 = u^0x80
+ veor q5, q13, q10 @ *op1 = u^0x80
+ veor q8, q12, q10 @ *oq1 = u^0x80
+
+ bx lr
+ .size vp9_loop_filter_neon_16, .-vp9_loop_filter_neon_16 @ ENDP @ |vp9_loop_filter_neon_16|
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/cpu_ref/vp9_loopfilter_neon.S b/cpu_ref/vp9_loopfilter_neon.S
new file mode 100644
index 0000000..2a38fda
--- /dev/null
+++ b/cpu_ref/vp9_loopfilter_neon.S
@@ -0,0 +1,735 @@
+@ This file was created from a .asm file
+@ using the ads2gas.pl script.
+ .equ DO1STROUNDING, 0
+@
+@ Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS. All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+@ Copyright (c) 2014 The Android Open Source Project
+@
+@ Licensed under the Apache License, Version 2.0 (the "License");
+@ you may not use this file except in compliance with the License.
+@ You may obtain a copy of the License at
+@
+@ http://www.apache.org/licenses/LICENSE-2.0
+@
+@ Unless required by applicable law or agreed to in writing, software
+@ distributed under the License is distributed on an "AS IS" BASIS,
+@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@ See the License for the specific language governing permissions and
+@ limitations under the License.
+
+ .global vp9_lpf_horizontal_4_neon
+ .type vp9_lpf_horizontal_4_neon, function
+ .global vp9_lpf_vertical_4_neon
+ .type vp9_lpf_vertical_4_neon, function
+ .global vp9_lpf_horizontal_8_neon
+ .type vp9_lpf_horizontal_8_neon, function
+ .global vp9_lpf_vertical_8_neon
+ .type vp9_lpf_vertical_8_neon, function
+ .arm
+
+.text
+.p2align 2
+
+@ Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+@ works on 16 iterations at a time.
+@ TODO(fgalligan): See about removing the count code as this function is only
+@ called with a count of 1.
+@
+@ void vp9_lpf_horizontal_4_neon(uint8_t *s,
+@ int p /* pitch */,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh,
+@ int count)
+@
+@ r0 uint8_t *s,
+@ r1 int p, /* pitch */
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+@ sp+4 int count
+_vp9_lpf_horizontal_4_neon:
+ vp9_lpf_horizontal_4_neon: @ PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] @ duplicate *blimit
+ ldr r12, [sp, #8] @ load count
+ ldr r2, [sp, #4] @ load thresh
+ add r1, r1, r1 @ double pitch
+
+ cmp r12, #0
+ beq end_vp9_lf_h_edge
+
+ vld1.8 {d1[]}, [r3] @ duplicate *limit
+ vld1.8 {d2[]}, [r2] @ duplicate *thresh
+
+count_lf_h_loop:
+ sub r2, r0, r1, lsl #1 @ move src pointer down by 4 lines
+ add r3, r2, r1, lsr #1 @ set to 3 lines down
+
+ vld1.u8 {d3}, [r2,:64], r1 @ p3
+ vld1.u8 {d4}, [r3,:64], r1 @ p2
+ vld1.u8 {d5}, [r2,:64], r1 @ p1
+ vld1.u8 {d6}, [r3,:64], r1 @ p0
+ vld1.u8 {d7}, [r2,:64], r1 @ q0
+ vld1.u8 {d16}, [r3,:64], r1 @ q1
+ vld1.u8 {d17}, [r2,:64] @ q2
+ vld1.u8 {d18}, [r3,:64] @ q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon
+
+ vst1.u8 {d4}, [r2,:64], r1 @ store op1
+ vst1.u8 {d5}, [r3,:64], r1 @ store op0
+ vst1.u8 {d6}, [r2,:64], r1 @ store oq0
+ vst1.u8 {d7}, [r3,:64], r1 @ store oq1
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_lf_h_loop
+
+end_vp9_lf_h_edge:
+ pop {pc}
+ .size vp9_lpf_horizontal_4_neon, .-vp9_lpf_horizontal_4_neon @ ENDP @ |vp9_lpf_horizontal_4_neon|
+
+@ Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+@ works on 16 iterations at a time.
+@ TODO(fgalligan): See about removing the count code as this function is only
+@ called with a count of 1.
+@
+@ void vp9_lpf_vertical_4_neon(uint8_t *s,
+@ int p /* pitch */,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh,
+@ int count)
+@
+@ r0 uint8_t *s,
+@ r1 int p, /* pitch */
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+@ sp+4 int count
+_vp9_lpf_vertical_4_neon:
+ vp9_lpf_vertical_4_neon: @ PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] @ duplicate *blimit
+ ldr r12, [sp, #8] @ load count
+ vld1.8 {d1[]}, [r3] @ duplicate *limit
+
+ ldr r3, [sp, #4] @ load thresh
+ sub r2, r0, #4 @ move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_lf_v_edge
+
+ vld1.8 {d2[]}, [r3] @ duplicate *thresh
+
+count_lf_v_loop:
+ vld1.u8 {d3}, [r2], r1 @ load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ @transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ bl vp9_loop_filter_neon
+
+ sub r0, r0, #2
+
+ @store op1, op0, oq0, oq1
+ vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
+ vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
+ vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
+ vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
+ vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
+ vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
+ vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
+ vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
+
+ add r0, r0, r1, lsl #3 @ s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 @ move s pointer down by 4 columns
+ bne count_lf_v_loop
+
+end_vp9_lf_v_edge:
+ pop {pc}
+ .size vp9_lpf_vertical_4_neon, .-vp9_lpf_vertical_4_neon @ ENDP @ |vp9_lpf_vertical_4_neon|
+
+@ void vp9_loop_filter_neon();
+@ This is a helper function for the loopfilters. The invidual functions do the
+@ necessary load, transpose (if necessary) and store. The function does not use
+@ registers d8-d15.
+@
+@ Inputs:
+@ r0-r3, r12 PRESERVE
+@ d0 blimit
+@ d1 limit
+@ d2 thresh
+@ d3 p3
+@ d4 p2
+@ d5 p1
+@ d6 p0
+@ d7 q0
+@ d16 q1
+@ d17 q2
+@ d18 q3
+@
+@ Outputs:
+@ d4 op1
+@ d5 op0
+@ d6 oq0
+@ d7 oq1
+_vp9_loop_filter_neon:
+ vp9_loop_filter_neon: @ PROC
+ @ filter_mask
+ vabd.u8 d19, d3, d4 @ m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 @ m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 @ m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 @ m4 = abs(q1 - q0)
+ vabd.u8 d3, d17, d16 @ m5 = abs(q2 - q1)
+ vabd.u8 d4, d18, d17 @ m6 = abs(q3 - q2)
+
+ @ only compare the largest value to limit
+ vmax.u8 d19, d19, d20 @ m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 @ m2 = max(m3, m4)
+
+ vabd.u8 d17, d6, d7 @ abs(p0 - q0)
+
+ vmax.u8 d3, d3, d4 @ m3 = max(m5, m6)
+
+ vmov.u8 d18, #0x80
+
+ vmax.u8 d23, d19, d20 @ m1 = max(m1, m2)
+
+ @ hevmask
+ vcgt.u8 d21, d21, d2 @ (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d2 @ (abs(q1 - q0) > thresh)*-1
+ vmax.u8 d23, d23, d3 @ m1 = max(m1, m3)
+
+ vabd.u8 d28, d5, d16 @ a = abs(p1 - q1)
+ vqadd.u8 d17, d17, d17 @ b = abs(p0 - q0) * 2
+
+ veor d7, d7, d18 @ qs0
+
+ vcge.u8 d23, d1, d23 @ abs(m1) > limit
+
+ @ filter() function
+ @ convert to signed
+
+ vshr.u8 d28, d28, #1 @ a = a / 2
+ veor d6, d6, d18 @ ps0
+
+ veor d5, d5, d18 @ ps1
+ vqadd.u8 d17, d17, d28 @ a = b + a
+
+ veor d16, d16, d18 @ qs1
+
+ vmov.u8 d19, #3
+
+ vsub.s8 d28, d7, d6 @ ( qs0 - ps0)
+
+ vcge.u8 d17, d0, d17 @ a > blimit
+
+ vqsub.s8 d27, d5, d16 @ filter = clamp(ps1-qs1)
+ vorr d22, d21, d22 @ hevmask
+
+ vmull.s8 q12, d28, d19 @ 3 * ( qs0 - ps0)
+
+ vand d27, d27, d22 @ filter &= hev
+ vand d23, d23, d17 @ filter_mask
+
+ vaddw.s8 q12, q12, d27 @ filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d17, #4
+
+ @ filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d27, q12
+
+ vand d27, d27, d23 @ filter &= mask
+
+ vqadd.s8 d28, d27, d19 @ filter2 = clamp(filter+3)
+ vqadd.s8 d27, d27, d17 @ filter1 = clamp(filter+4)
+ vshr.s8 d28, d28, #3 @ filter2 >>= 3
+ vshr.s8 d27, d27, #3 @ filter1 >>= 3
+
+ vqadd.s8 d19, d6, d28 @ u = clamp(ps0 + filter2)
+ vqsub.s8 d26, d7, d27 @ u = clamp(qs0 - filter1)
+
+ @ outer tap adjustments
+ vrshr.s8 d27, d27, #1 @ filter = ++filter1 >> 1
+
+ veor d6, d26, d18 @ *oq0 = u^0x80
+
+ vbic d27, d27, d22 @ filter &= ~hev
+
+ vqadd.s8 d21, d5, d27 @ u = clamp(ps1 + filter)
+ vqsub.s8 d20, d16, d27 @ u = clamp(qs1 - filter)
+
+ veor d5, d19, d18 @ *op0 = u^0x80
+ veor d4, d21, d18 @ *op1 = u^0x80
+ veor d7, d20, d18 @ *oq1 = u^0x80
+
+ bx lr
+ .size vp9_loop_filter_neon, .-vp9_loop_filter_neon @ ENDP @ |vp9_loop_filter_neon|
+
+@ void vp9_lpf_horizontal_8_neon(uint8_t *s, int p,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh,
+@ int count)
+@ r0 uint8_t *s,
+@ r1 int p, /* pitch */
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+@ sp+4 int count
+_vp9_lpf_horizontal_8_neon:
+ vp9_lpf_horizontal_8_neon: @ PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] @ duplicate *blimit
+ ldr r12, [sp, #16] @ load count
+ ldr r2, [sp, #12] @ load thresh
+ add r1, r1, r1 @ double pitch
+
+ cmp r12, #0
+ beq end_vp9_mblf_h_edge
+
+ vld1.8 {d1[]}, [r3] @ duplicate *limit
+ vld1.8 {d2[]}, [r2] @ duplicate *thresh
+
+count_mblf_h_loop:
+ sub r3, r0, r1, lsl #1 @ move src pointer down by 4 lines
+ add r2, r3, r1, lsr #1 @ set to 3 lines down
+
+ vld1.u8 {d3}, [r3,:64], r1 @ p3
+ vld1.u8 {d4}, [r2,:64], r1 @ p2
+ vld1.u8 {d5}, [r3,:64], r1 @ p1
+ vld1.u8 {d6}, [r2,:64], r1 @ p0
+ vld1.u8 {d7}, [r3,:64], r1 @ q0
+ vld1.u8 {d16}, [r2,:64], r1 @ q1
+ vld1.u8 {d17}, [r3,:64] @ q2
+ vld1.u8 {d18}, [r2,:64], r1 @ q3
+
+ sub r3, r3, r1, lsl #1
+ sub r2, r2, r1, lsl #2
+
+ bl vp9_mbloop_filter_neon
+
+ vst1.u8 {d0}, [r2,:64], r1 @ store op2
+ vst1.u8 {d1}, [r3,:64], r1 @ store op1
+ vst1.u8 {d2}, [r2,:64], r1 @ store op0
+ vst1.u8 {d3}, [r3,:64], r1 @ store oq0
+ vst1.u8 {d4}, [r2,:64], r1 @ store oq1
+ vst1.u8 {d5}, [r3,:64], r1 @ store oq2
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_mblf_h_loop
+
+end_vp9_mblf_h_edge:
+ pop {r4-r5, pc}
+
+ .size vp9_lpf_horizontal_8_neon, .-vp9_lpf_horizontal_8_neon @ ENDP @ |vp9_lpf_horizontal_8_neon|
+
+@ void vp9_lpf_vertical_8_neon(uint8_t *s,
+@ int pitch,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh,
+@ int count)
+@
+@ r0 uint8_t *s,
+@ r1 int pitch,
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+@ sp+4 int count
+_vp9_lpf_vertical_8_neon:
+ vp9_lpf_vertical_8_neon: @ PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] @ duplicate *blimit
+ ldr r12, [sp, #16] @ load count
+ vld1.8 {d1[]}, [r3] @ duplicate *limit
+
+ ldr r3, [sp, #12] @ load thresh
+ sub r2, r0, #4 @ move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_mblf_v_edge
+
+ vld1.8 {d2[]}, [r3] @ duplicate *thresh
+
+count_mblf_v_loop:
+ vld1.u8 {d3}, [r2], r1 @ load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ @transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ sub r2, r0, #3
+ add r3, r0, #1
+
+ bl vp9_mbloop_filter_neon
+
+ @store op2, op1, op0, oq0
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r2], r1
+ vst4.8 {d0[2], d1[2], d2[2], d3[2]}, [r2], r1
+ vst4.8 {d0[3], d1[3], d2[3], d3[3]}, [r2], r1
+ vst4.8 {d0[4], d1[4], d2[4], d3[4]}, [r2], r1
+ vst4.8 {d0[5], d1[5], d2[5], d3[5]}, [r2], r1
+ vst4.8 {d0[6], d1[6], d2[6], d3[6]}, [r2], r1
+ vst4.8 {d0[7], d1[7], d2[7], d3[7]}, [r2]
+
+ @store oq1, oq2
+ vst2.8 {d4[0], d5[0]}, [r3], r1
+ vst2.8 {d4[1], d5[1]}, [r3], r1
+ vst2.8 {d4[2], d5[2]}, [r3], r1
+ vst2.8 {d4[3], d5[3]}, [r3], r1
+ vst2.8 {d4[4], d5[4]}, [r3], r1
+ vst2.8 {d4[5], d5[5]}, [r3], r1
+ vst2.8 {d4[6], d5[6]}, [r3], r1
+ vst2.8 {d4[7], d5[7]}, [r3]
+
+ add r0, r0, r1, lsl #3 @ s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 @ move s pointer down by 4 columns
+ bne count_mblf_v_loop
+
+end_vp9_mblf_v_edge:
+ pop {r4-r5, pc}
+ .size vp9_lpf_vertical_8_neon, .-vp9_lpf_vertical_8_neon @ ENDP @ |vp9_lpf_vertical_8_neon|
+
+@ void vp9_mbloop_filter_neon();
+@ This is a helper function for the loopfilters. The invidual functions do the
+@ necessary load, transpose (if necessary) and store. The function does not use
+@ registers d8-d15.
+@
+@ Inputs:
+@ r0-r3, r12 PRESERVE
+@ d0 blimit
+@ d1 limit
+@ d2 thresh
+@ d3 p3
+@ d4 p2
+@ d5 p1
+@ d6 p0
+@ d7 q0
+@ d16 q1
+@ d17 q2
+@ d18 q3
+@
+@ Outputs:
+@ d0 op2
+@ d1 op1
+@ d2 op0
+@ d3 oq0
+@ d4 oq1
+@ d5 oq2
+_vp9_mbloop_filter_neon:
+ vp9_mbloop_filter_neon: @ PROC
+ @ filter_mask
+ vabd.u8 d19, d3, d4 @ m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 @ m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 @ m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 @ m4 = abs(q1 - q0)
+ vabd.u8 d23, d17, d16 @ m5 = abs(q2 - q1)
+ vabd.u8 d24, d18, d17 @ m6 = abs(q3 - q2)
+
+ @ only compare the largest value to limit
+ vmax.u8 d19, d19, d20 @ m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 @ m2 = max(m3, m4)
+
+ vabd.u8 d25, d6, d4 @ m7 = abs(p0 - p2)
+
+ vmax.u8 d23, d23, d24 @ m3 = max(m5, m6)
+
+ vabd.u8 d26, d7, d17 @ m8 = abs(q0 - q2)
+
+ vmax.u8 d19, d19, d20
+
+ vabd.u8 d24, d6, d7 @ m9 = abs(p0 - q0)
+ vabd.u8 d27, d3, d6 @ m10 = abs(p3 - p0)
+ vabd.u8 d28, d18, d7 @ m11 = abs(q3 - q0)
+
+ vmax.u8 d19, d19, d23
+
+ vabd.u8 d23, d5, d16 @ a = abs(p1 - q1)
+ vqadd.u8 d24, d24, d24 @ b = abs(p0 - q0) * 2
+
+ @ abs () > limit
+ vcge.u8 d19, d1, d19
+
+ @ only compare the largest value to thresh
+ vmax.u8 d25, d25, d26 @ m4 = max(m7, m8)
+ vmax.u8 d26, d27, d28 @ m5 = max(m10, m11)
+
+ vshr.u8 d23, d23, #1 @ a = a / 2
+
+ vmax.u8 d25, d25, d26 @ m4 = max(m4, m5)
+
+ vqadd.u8 d24, d24, d23 @ a = b + a
+
+ vmax.u8 d20, d20, d25 @ m2 = max(m2, m4)
+
+ vmov.u8 d23, #1
+ vcge.u8 d24, d0, d24 @ a > blimit
+
+ vcgt.u8 d21, d21, d2 @ (abs(p1 - p0) > thresh)*-1
+
+ vcge.u8 d20, d23, d20 @ flat
+
+ vand d19, d19, d24 @ mask
+
+ vcgt.u8 d23, d22, d2 @ (abs(q1 - q0) > thresh)*-1
+
+ vand d20, d20, d19 @ flat & mask
+
+ vmov.u8 d22, #0x80
+
+ vorr d23, d21, d23 @ hev
+
+ @ This instruction will truncate the "flat & mask" masks down to 4 bits
+ @ each to fit into one 32 bit arm register. The values are stored in
+ @ q10.64[0].
+ vshrn.u16 d30, q10, #4
+ vmov.u32 r4, d30[0] @ flat & mask 4bits
+
+ adds r5, r4, #1 @ Check for all 1's
+
+ @ If mask and flat are 1's for all vectors, then we only need to execute
+ @ the power branch for all vectors.
+ beq power_branch_only
+
+ cmp r4, #0 @ Check for 0, set flag for later
+
+ @ mbfilter() function
+ @ filter() function
+ @ convert to signed
+ veor d21, d7, d22 @ qs0
+ veor d24, d6, d22 @ ps0
+ veor d25, d5, d22 @ ps1
+ veor d26, d16, d22 @ qs1
+
+ vmov.u8 d27, #3
+
+ vsub.s8 d28, d21, d24 @ ( qs0 - ps0)
+
+ vqsub.s8 d29, d25, d26 @ filter = clamp(ps1-qs1)
+
+ vmull.s8 q15, d28, d27 @ 3 * ( qs0 - ps0)
+
+ vand d29, d29, d23 @ filter &= hev
+
+ vaddw.s8 q15, q15, d29 @ filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d29, #4
+
+ @ filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d28, q15
+
+ vand d28, d28, d19 @ filter &= mask
+
+ vqadd.s8 d30, d28, d27 @ filter2 = clamp(filter+3)
+ vqadd.s8 d29, d28, d29 @ filter1 = clamp(filter+4)
+ vshr.s8 d30, d30, #3 @ filter2 >>= 3
+ vshr.s8 d29, d29, #3 @ filter1 >>= 3
+
+ vqadd.s8 d24, d24, d30 @ op0 = clamp(ps0 + filter2)
+ vqsub.s8 d21, d21, d29 @ oq0 = clamp(qs0 - filter1)
+
+ @ outer tap adjustments: ++filter1 >> 1
+ vrshr.s8 d29, d29, #1
+ vbic d29, d29, d23 @ filter &= ~hev
+
+ vqadd.s8 d25, d25, d29 @ op1 = clamp(ps1 + filter)
+ vqsub.s8 d26, d26, d29 @ oq1 = clamp(qs1 - filter)
+
+ @ If mask and flat are 0's for all vectors, then we only need to execute
+ @ the filter branch for all vectors.
+ beq filter_branch_only
+
+ @ If mask and flat are mixed then we must perform both branches and
+ @ combine the data.
+ veor d24, d24, d22 @ *f_op0 = u^0x80
+ veor d21, d21, d22 @ *f_oq0 = u^0x80
+ veor d25, d25, d22 @ *f_op1 = u^0x80
+ veor d26, d26, d22 @ *f_oq1 = u^0x80
+
+ @ At this point we have already executed the filter branch. The filter
+ @ branch does not set op2 or oq2, so use p2 and q2. Execute the power
+ @ branch and combine the data.
+ vmov.u8 d23, #2
+ vaddl.u8 q14, d6, d7 @ r_op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 @ r_op2 += p3 * 3
+ vmlal.u8 q14, d4, d23 @ r_op2 += p2 * 2
+
+ vbif d0, d4, d20 @ op2 |= p2 & ~(flat & mask)
+
+ vaddw.u8 q14, d5 @ r_op2 += p1
+
+ vbif d1, d25, d20 @ op1 |= f_op1 & ~(flat & mask)
+
+ vqrshrn.u16 d30, q14, #3 @ r_op2
+
+ vsubw.u8 q14, d3 @ r_op1 = r_op2 - p3
+ vsubw.u8 q14, d4 @ r_op1 -= p2
+ vaddw.u8 q14, d5 @ r_op1 += p1
+ vaddw.u8 q14, d16 @ r_op1 += q1
+
+ vbif d2, d24, d20 @ op0 |= f_op0 & ~(flat & mask)
+
+ vqrshrn.u16 d31, q14, #3 @ r_op1
+
+ vsubw.u8 q14, d3 @ r_op0 = r_op1 - p3
+ vsubw.u8 q14, d5 @ r_op0 -= p1
+ vaddw.u8 q14, d6 @ r_op0 += p0
+ vaddw.u8 q14, d17 @ r_op0 += q2
+
+ vbit d0, d30, d20 @ op2 |= r_op2 & (flat & mask)
+
+ vqrshrn.u16 d23, q14, #3 @ r_op0
+
+ vsubw.u8 q14, d3 @ r_oq0 = r_op0 - p3
+ vsubw.u8 q14, d6 @ r_oq0 -= p0
+ vaddw.u8 q14, d7 @ r_oq0 += q0
+
+ vbit d1, d31, d20 @ op1 |= r_op1 & (flat & mask)
+
+ vaddw.u8 q14, d18 @ oq0 += q3
+
+ vbit d2, d23, d20 @ op0 |= r_op0 & (flat & mask)
+
+ vqrshrn.u16 d22, q14, #3 @ r_oq0
+
+ vsubw.u8 q14, d4 @ r_oq1 = r_oq0 - p2
+ vsubw.u8 q14, d7 @ r_oq1 -= q0
+ vaddw.u8 q14, d16 @ r_oq1 += q1
+
+ vbif d3, d21, d20 @ oq0 |= f_oq0 & ~(flat & mask)
+
+ vaddw.u8 q14, d18 @ r_oq1 += q3
+
+ vbif d4, d26, d20 @ oq1 |= f_oq1 & ~(flat & mask)
+
+ vqrshrn.u16 d6, q14, #3 @ r_oq1
+
+ vsubw.u8 q14, d5 @ r_oq2 = r_oq1 - p1
+ vsubw.u8 q14, d16 @ r_oq2 -= q1
+ vaddw.u8 q14, d17 @ r_oq2 += q2
+ vaddw.u8 q14, d18 @ r_oq2 += q3
+
+ vbif d5, d17, d20 @ oq2 |= q2 & ~(flat & mask)
+
+ vqrshrn.u16 d7, q14, #3 @ r_oq2
+
+ vbit d3, d22, d20 @ oq0 |= r_oq0 & (flat & mask)
+ vbit d4, d6, d20 @ oq1 |= r_oq1 & (flat & mask)
+ vbit d5, d7, d20 @ oq2 |= r_oq2 & (flat & mask)
+
+ bx lr
+
+power_branch_only:
+ vmov.u8 d27, #3
+ vmov.u8 d21, #2
+ vaddl.u8 q14, d6, d7 @ op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 @ op2 += p3 * 3
+ vmlal.u8 q14, d4, d21 @ op2 += p2 * 2
+ vaddw.u8 q14, d5 @ op2 += p1
+ vqrshrn.u16 d0, q14, #3 @ op2
+
+ vsubw.u8 q14, d3 @ op1 = op2 - p3
+ vsubw.u8 q14, d4 @ op1 -= p2
+ vaddw.u8 q14, d5 @ op1 += p1
+ vaddw.u8 q14, d16 @ op1 += q1
+ vqrshrn.u16 d1, q14, #3 @ op1
+
+ vsubw.u8 q14, d3 @ op0 = op1 - p3
+ vsubw.u8 q14, d5 @ op0 -= p1
+ vaddw.u8 q14, d6 @ op0 += p0
+ vaddw.u8 q14, d17 @ op0 += q2
+ vqrshrn.u16 d2, q14, #3 @ op0
+
+ vsubw.u8 q14, d3 @ oq0 = op0 - p3
+ vsubw.u8 q14, d6 @ oq0 -= p0
+ vaddw.u8 q14, d7 @ oq0 += q0
+ vaddw.u8 q14, d18 @ oq0 += q3
+ vqrshrn.u16 d3, q14, #3 @ oq0
+
+ vsubw.u8 q14, d4 @ oq1 = oq0 - p2
+ vsubw.u8 q14, d7 @ oq1 -= q0
+ vaddw.u8 q14, d16 @ oq1 += q1
+ vaddw.u8 q14, d18 @ oq1 += q3
+ vqrshrn.u16 d4, q14, #3 @ oq1
+
+ vsubw.u8 q14, d5 @ oq2 = oq1 - p1
+ vsubw.u8 q14, d16 @ oq2 -= q1
+ vaddw.u8 q14, d17 @ oq2 += q2
+ vaddw.u8 q14, d18 @ oq2 += q3
+ vqrshrn.u16 d5, q14, #3 @ oq2
+
+ bx lr
+
+filter_branch_only:
+ @ TODO(fgalligan): See if we can rearange registers so we do not need to
+ @ do the 2 vswp.
+ vswp d0, d4 @ op2
+ vswp d5, d17 @ oq2
+ veor d2, d24, d22 @ *op0 = u^0x80
+ veor d3, d21, d22 @ *oq0 = u^0x80
+ veor d1, d25, d22 @ *op1 = u^0x80
+ veor d4, d26, d22 @ *oq1 = u^0x80
+
+ bx lr
+
+ .size vp9_mbloop_filter_neon, .-vp9_mbloop_filter_neon @ ENDP @ |vp9_mbloop_filter_neon|
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/cpu_ref/vp9_mb_lpf_neon.S b/cpu_ref/vp9_mb_lpf_neon.S
new file mode 100644
index 0000000..ea20cba
--- /dev/null
+++ b/cpu_ref/vp9_mb_lpf_neon.S
@@ -0,0 +1,625 @@
+@ This file was created from a .asm file
+@ using the ads2gas.pl script.
+ .equ DO1STROUNDING, 0
+@
+@ Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS. All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+@ Copyright (c) 2014 The Android Open Source Project
+@
+@ Licensed under the Apache License, Version 2.0 (the "License")
+@ you may not use this file except in compliance with the License.
+@ You may obtain a copy of the License at
+@
+@ http://www.apache.org/licenses/LICENSE-2.0
+@
+@ Unless required by applicable law or agreed to in writing, software
+@ distributed under the License is distributed on an "AS IS" BASIS,
+@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@ See the License for the specific language governing permissions and
+@ limitations under the License.
+
+ .global vp9_lpf_horizontal_16_neon
+ .type vp9_lpf_horizontal_16_neon, function
+ .global vp9_lpf_vertical_16_neon
+ .type vp9_lpf_vertical_16_neon, function
+ .arm
+
+.text
+.p2align 2
+
+@ void vp9_lpf_horizontal_16_neon(uint8_t *s, int p,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh
+@ int count)
+@ r0 uint8_t *s,
+@ r1 int p, /* pitch */
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+_vp9_lpf_horizontal_16_neon:
+ vp9_lpf_horizontal_16_neon: @ PROC
+ push {r4-r8, lr}
+ vpush {d8-d15}
+ ldr r4, [sp, #88] @ load thresh
+ ldr r12, [sp, #92] @ load count
+
+h_count:
+ vld1.8 {d16[]}, [r2] @ load *blimit
+ vld1.8 {d17[]}, [r3] @ load *limit
+ vld1.8 {d18[]}, [r4] @ load *thresh
+
+ sub r8, r0, r1, lsl #3 @ move src pointer down by 8 lines
+
+ vld1.u8 {d0}, [r8,:64], r1 @ p7
+ vld1.u8 {d1}, [r8,:64], r1 @ p6
+ vld1.u8 {d2}, [r8,:64], r1 @ p5
+ vld1.u8 {d3}, [r8,:64], r1 @ p4
+ vld1.u8 {d4}, [r8,:64], r1 @ p3
+ vld1.u8 {d5}, [r8,:64], r1 @ p2
+ vld1.u8 {d6}, [r8,:64], r1 @ p1
+ vld1.u8 {d7}, [r8,:64], r1 @ p0
+ vld1.u8 {d8}, [r8,:64], r1 @ q0
+ vld1.u8 {d9}, [r8,:64], r1 @ q1
+ vld1.u8 {d10}, [r8,:64], r1 @ q2
+ vld1.u8 {d11}, [r8,:64], r1 @ q3
+ vld1.u8 {d12}, [r8,:64], r1 @ q4
+ vld1.u8 {d13}, [r8,:64], r1 @ q5
+ vld1.u8 {d14}, [r8,:64], r1 @ q6
+ vld1.u8 {d15}, [r8,:64], r1 @ q7
+
+ bl vp9_wide_mbfilter_neon
+
+ tst r7, #1
+ beq h_mbfilter
+
+ @ flat && mask were not set for any of the channels. Just store the values
+ @ from filter.
+ sub r8, r0, r1, lsl #1
+
+ vst1.u8 {d25}, [r8,:64], r1 @ store op1
+ vst1.u8 {d24}, [r8,:64], r1 @ store op0
+ vst1.u8 {d23}, [r8,:64], r1 @ store oq0
+ vst1.u8 {d26}, [r8,:64], r1 @ store oq1
+
+ b h_next
+
+h_mbfilter:
+ tst r7, #2
+ beq h_wide_mbfilter
+
+ @ flat2 was not set for any of the channels. Just store the values from
+ @ mbfilter.
+ sub r8, r0, r1, lsl #1
+ sub r8, r8, r1
+
+ vst1.u8 {d18}, [r8,:64], r1 @ store op2
+ vst1.u8 {d19}, [r8,:64], r1 @ store op1
+ vst1.u8 {d20}, [r8,:64], r1 @ store op0
+ vst1.u8 {d21}, [r8,:64], r1 @ store oq0
+ vst1.u8 {d22}, [r8,:64], r1 @ store oq1
+ vst1.u8 {d23}, [r8,:64], r1 @ store oq2
+
+ b h_next
+
+h_wide_mbfilter:
+ sub r8, r0, r1, lsl #3
+ add r8, r8, r1
+
+ vst1.u8 {d16}, [r8,:64], r1 @ store op6
+ vst1.u8 {d24}, [r8,:64], r1 @ store op5
+ vst1.u8 {d25}, [r8,:64], r1 @ store op4
+ vst1.u8 {d26}, [r8,:64], r1 @ store op3
+ vst1.u8 {d27}, [r8,:64], r1 @ store op2
+ vst1.u8 {d18}, [r8,:64], r1 @ store op1
+ vst1.u8 {d19}, [r8,:64], r1 @ store op0
+ vst1.u8 {d20}, [r8,:64], r1 @ store oq0
+ vst1.u8 {d21}, [r8,:64], r1 @ store oq1
+ vst1.u8 {d22}, [r8,:64], r1 @ store oq2
+ vst1.u8 {d23}, [r8,:64], r1 @ store oq3
+ vst1.u8 {d1}, [r8,:64], r1 @ store oq4
+ vst1.u8 {d2}, [r8,:64], r1 @ store oq5
+ vst1.u8 {d3}, [r8,:64], r1 @ store oq6
+
+h_next:
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne h_count
+
+ vpop {d8-d15}
+ pop {r4-r8, pc}
+
+ .size vp9_lpf_horizontal_16_neon, .-vp9_lpf_horizontal_16_neon @ ENDP @ |vp9_lpf_horizontal_16_neon|
+
+@ void vp9_lpf_vertical_16_neon(uint8_t *s, int p,
+@ const uint8_t *blimit,
+@ const uint8_t *limit,
+@ const uint8_t *thresh)
+@ r0 uint8_t *s,
+@ r1 int p, /* pitch */
+@ r2 const uint8_t *blimit,
+@ r3 const uint8_t *limit,
+@ sp const uint8_t *thresh,
+_vp9_lpf_vertical_16_neon:
+ vp9_lpf_vertical_16_neon: @ PROC
+ push {r4-r8, lr}
+ vpush {d8-d15}
+ ldr r4, [sp, #88] @ load thresh
+
+ vld1.8 {d16[]}, [r2] @ load *blimit
+ vld1.8 {d17[]}, [r3] @ load *limit
+ vld1.8 {d18[]}, [r4] @ load *thresh
+
+ sub r8, r0, #8
+
+ vld1.8 {d0}, [r8,:64], r1
+ vld1.8 {d8}, [r0,:64], r1
+ vld1.8 {d1}, [r8,:64], r1
+ vld1.8 {d9}, [r0,:64], r1
+ vld1.8 {d2}, [r8,:64], r1
+ vld1.8 {d10}, [r0,:64], r1
+ vld1.8 {d3}, [r8,:64], r1
+ vld1.8 {d11}, [r0,:64], r1
+ vld1.8 {d4}, [r8,:64], r1
+ vld1.8 {d12}, [r0,:64], r1
+ vld1.8 {d5}, [r8,:64], r1
+ vld1.8 {d13}, [r0,:64], r1
+ vld1.8 {d6}, [r8,:64], r1
+ vld1.8 {d14}, [r0,:64], r1
+ vld1.8 {d7}, [r8,:64], r1
+ vld1.8 {d15}, [r0,:64], r1
+
+ sub r0, r0, r1, lsl #3
+
+ vtrn.32 q0, q2
+ vtrn.32 q1, q3
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+
+ vtrn.16 q0, q1
+ vtrn.16 q2, q3
+ vtrn.16 q4, q5
+ vtrn.16 q6, q7
+
+ vtrn.8 d0, d1
+ vtrn.8 d2, d3
+ vtrn.8 d4, d5
+ vtrn.8 d6, d7
+
+ vtrn.8 d8, d9
+ vtrn.8 d10, d11
+ vtrn.8 d12, d13
+ vtrn.8 d14, d15
+
+ bl vp9_wide_mbfilter_neon
+
+ tst r7, #1
+ beq v_mbfilter
+
+ @ flat && mask were not set for any of the channels. Just store the values
+ @ from filter.
+ sub r8, r0, #2
+
+ vswp d23, d25
+
+ vst4.8 {d23[0], d24[0], d25[0], d26[0]}, [r8], r1
+ vst4.8 {d23[1], d24[1], d25[1], d26[1]}, [r8], r1
+ vst4.8 {d23[2], d24[2], d25[2], d26[2]}, [r8], r1
+ vst4.8 {d23[3], d24[3], d25[3], d26[3]}, [r8], r1
+ vst4.8 {d23[4], d24[4], d25[4], d26[4]}, [r8], r1
+ vst4.8 {d23[5], d24[5], d25[5], d26[5]}, [r8], r1
+ vst4.8 {d23[6], d24[6], d25[6], d26[6]}, [r8], r1
+ vst4.8 {d23[7], d24[7], d25[7], d26[7]}, [r8], r1
+
+ b v_end
+
+v_mbfilter:
+ tst r7, #2
+ beq v_wide_mbfilter
+
+ @ flat2 was not set for any of the channels. Just store the values from
+ @ mbfilter.
+ sub r8, r0, #3
+
+ vst3.8 {d18[0], d19[0], d20[0]}, [r8], r1
+ vst3.8 {d21[0], d22[0], d23[0]}, [r0], r1
+ vst3.8 {d18[1], d19[1], d20[1]}, [r8], r1
+ vst3.8 {d21[1], d22[1], d23[1]}, [r0], r1
+ vst3.8 {d18[2], d19[2], d20[2]}, [r8], r1
+ vst3.8 {d21[2], d22[2], d23[2]}, [r0], r1
+ vst3.8 {d18[3], d19[3], d20[3]}, [r8], r1
+ vst3.8 {d21[3], d22[3], d23[3]}, [r0], r1
+ vst3.8 {d18[4], d19[4], d20[4]}, [r8], r1
+ vst3.8 {d21[4], d22[4], d23[4]}, [r0], r1
+ vst3.8 {d18[5], d19[5], d20[5]}, [r8], r1
+ vst3.8 {d21[5], d22[5], d23[5]}, [r0], r1
+ vst3.8 {d18[6], d19[6], d20[6]}, [r8], r1
+ vst3.8 {d21[6], d22[6], d23[6]}, [r0], r1
+ vst3.8 {d18[7], d19[7], d20[7]}, [r8], r1
+ vst3.8 {d21[7], d22[7], d23[7]}, [r0], r1
+
+ b v_end
+
+v_wide_mbfilter:
+ sub r8, r0, #8
+
+ vtrn.32 d0, d26
+ vtrn.32 d16, d27
+ vtrn.32 d24, d18
+ vtrn.32 d25, d19
+
+ vtrn.16 d0, d24
+ vtrn.16 d16, d25
+ vtrn.16 d26, d18
+ vtrn.16 d27, d19
+
+ vtrn.8 d0, d16
+ vtrn.8 d24, d25
+ vtrn.8 d26, d27
+ vtrn.8 d18, d19
+
+ vtrn.32 d20, d1
+ vtrn.32 d21, d2
+ vtrn.32 d22, d3
+ vtrn.32 d23, d15
+
+ vtrn.16 d20, d22
+ vtrn.16 d21, d23
+ vtrn.16 d1, d3
+ vtrn.16 d2, d15
+
+ vtrn.8 d20, d21
+ vtrn.8 d22, d23
+ vtrn.8 d1, d2
+ vtrn.8 d3, d15
+
+ vst1.8 {d0}, [r8,:64], r1
+ vst1.8 {d20}, [r0,:64], r1
+ vst1.8 {d16}, [r8,:64], r1
+ vst1.8 {d21}, [r0,:64], r1
+ vst1.8 {d24}, [r8,:64], r1
+ vst1.8 {d22}, [r0,:64], r1
+ vst1.8 {d25}, [r8,:64], r1
+ vst1.8 {d23}, [r0,:64], r1
+ vst1.8 {d26}, [r8,:64], r1
+ vst1.8 {d1}, [r0,:64], r1
+ vst1.8 {d27}, [r8,:64], r1
+ vst1.8 {d2}, [r0,:64], r1
+ vst1.8 {d18}, [r8,:64], r1
+ vst1.8 {d3}, [r0,:64], r1
+ vst1.8 {d19}, [r8,:64], r1
+ vst1.8 {d15}, [r0,:64], r1
+
+v_end:
+ vpop {d8-d15}
+ pop {r4-r8, pc}
+
+ .size vp9_lpf_vertical_16_neon, .-vp9_lpf_vertical_16_neon @ ENDP @ |vp9_lpf_vertical_16_neon|
+
+@ void vp9_wide_mbfilter_neon();
+@ This is a helper function for the loopfilters. The invidual functions do the
+@ necessary load, transpose (if necessary) and store.
+@
+@ r0-r3 PRESERVE
+@ d16 blimit
+@ d17 limit
+@ d18 thresh
+@ d0 p7
+@ d1 p6
+@ d2 p5
+@ d3 p4
+@ d4 p3
+@ d5 p2
+@ d6 p1
+@ d7 p0
+@ d8 q0
+@ d9 q1
+@ d10 q2
+@ d11 q3
+@ d12 q4
+@ d13 q5
+@ d14 q6
+@ d15 q7
+_vp9_wide_mbfilter_neon:
+ vp9_wide_mbfilter_neon: @ PROC
+ mov r7, #0
+
+ @ filter_mask
+ vabd.u8 d19, d4, d5 @ abs(p3 - p2)
+ vabd.u8 d20, d5, d6 @ abs(p2 - p1)
+ vabd.u8 d21, d6, d7 @ abs(p1 - p0)
+ vabd.u8 d22, d9, d8 @ abs(q1 - q0)
+ vabd.u8 d23, d10, d9 @ abs(q2 - q1)
+ vabd.u8 d24, d11, d10 @ abs(q3 - q2)
+
+ @ only compare the largest value to limit
+ vmax.u8 d19, d19, d20 @ max(abs(p3 - p2), abs(p2 - p1))
+ vmax.u8 d20, d21, d22 @ max(abs(p1 - p0), abs(q1 - q0))
+ vmax.u8 d23, d23, d24 @ max(abs(q2 - q1), abs(q3 - q2))
+ vmax.u8 d19, d19, d20
+
+ vabd.u8 d24, d7, d8 @ abs(p0 - q0)
+
+ vmax.u8 d19, d19, d23
+
+ vabd.u8 d23, d6, d9 @ a = abs(p1 - q1)
+ vqadd.u8 d24, d24, d24 @ b = abs(p0 - q0) * 2
+
+ @ abs () > limit
+ vcge.u8 d19, d17, d19
+
+ @ flatmask4
+ vabd.u8 d25, d7, d5 @ abs(p0 - p2)
+ vabd.u8 d26, d8, d10 @ abs(q0 - q2)
+ vabd.u8 d27, d4, d7 @ abs(p3 - p0)
+ vabd.u8 d28, d11, d8 @ abs(q3 - q0)
+
+ @ only compare the largest value to thresh
+ vmax.u8 d25, d25, d26 @ max(abs(p0 - p2), abs(q0 - q2))
+ vmax.u8 d26, d27, d28 @ max(abs(p3 - p0), abs(q3 - q0))
+ vmax.u8 d25, d25, d26
+ vmax.u8 d20, d20, d25
+
+ vshr.u8 d23, d23, #1 @ a = a / 2
+ vqadd.u8 d24, d24, d23 @ a = b + a
+
+ vmov.u8 d30, #1
+ vcge.u8 d24, d16, d24 @ (a > blimit * 2 + limit) * -1
+
+ vcge.u8 d20, d30, d20 @ flat
+
+ vand d19, d19, d24 @ mask
+
+ @ hevmask
+ vcgt.u8 d21, d21, d18 @ (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d18 @ (abs(q1 - q0) > thresh)*-1
+ vorr d21, d21, d22 @ hev
+
+ vand d16, d20, d19 @ flat && mask
+ vmov r5, r6, d16
+
+ @ flatmask5(1, p7, p6, p5, p4, p0, q0, q4, q5, q6, q7)
+ vabd.u8 d22, d3, d7 @ abs(p4 - p0)
+ vabd.u8 d23, d12, d8 @ abs(q4 - q0)
+ vabd.u8 d24, d7, d2 @ abs(p0 - p5)
+ vabd.u8 d25, d8, d13 @ abs(q0 - q5)
+ vabd.u8 d26, d1, d7 @ abs(p6 - p0)
+ vabd.u8 d27, d14, d8 @ abs(q6 - q0)
+ vabd.u8 d28, d0, d7 @ abs(p7 - p0)
+ vabd.u8 d29, d15, d8 @ abs(q7 - q0)
+
+ @ only compare the largest value to thresh
+ vmax.u8 d22, d22, d23 @ max(abs(p4 - p0), abs(q4 - q0))
+ vmax.u8 d23, d24, d25 @ max(abs(p0 - p5), abs(q0 - q5))
+ vmax.u8 d24, d26, d27 @ max(abs(p6 - p0), abs(q6 - q0))
+ vmax.u8 d25, d28, d29 @ max(abs(p7 - p0), abs(q7 - q0))
+
+ vmax.u8 d26, d22, d23
+ vmax.u8 d27, d24, d25
+ vmax.u8 d23, d26, d27
+
+ vcge.u8 d18, d30, d23 @ flat2
+
+ vmov.u8 d22, #0x80
+
+ orrs r5, r5, r6 @ Check for 0
+ orreq r7, r7, #1 @ Only do filter branch
+
+ vand d17, d18, d16 @ flat2 && flat && mask
+ vmov r5, r6, d17
+
+ @ mbfilter() function
+
+ @ filter() function
+ @ convert to signed
+ veor d23, d8, d22 @ qs0
+ veor d24, d7, d22 @ ps0
+ veor d25, d6, d22 @ ps1
+ veor d26, d9, d22 @ qs1
+
+ vmov.u8 d27, #3
+
+ vsub.s8 d28, d23, d24 @ ( qs0 - ps0)
+ vqsub.s8 d29, d25, d26 @ filter = clamp(ps1-qs1)
+ vmull.s8 q15, d28, d27 @ 3 * ( qs0 - ps0)
+ vand d29, d29, d21 @ filter &= hev
+ vaddw.s8 q15, q15, d29 @ filter + 3 * (qs0 - ps0)
+ vmov.u8 d29, #4
+
+ @ filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d28, q15
+
+ vand d28, d28, d19 @ filter &= mask
+
+ vqadd.s8 d30, d28, d27 @ filter2 = clamp(filter+3)
+ vqadd.s8 d29, d28, d29 @ filter1 = clamp(filter+4)
+ vshr.s8 d30, d30, #3 @ filter2 >>= 3
+ vshr.s8 d29, d29, #3 @ filter1 >>= 3
+
+
+ vqadd.s8 d24, d24, d30 @ op0 = clamp(ps0 + filter2)
+ vqsub.s8 d23, d23, d29 @ oq0 = clamp(qs0 - filter1)
+
+ @ outer tap adjustments: ++filter1 >> 1
+ vrshr.s8 d29, d29, #1
+ vbic d29, d29, d21 @ filter &= ~hev
+
+ vqadd.s8 d25, d25, d29 @ op1 = clamp(ps1 + filter)
+ vqsub.s8 d26, d26, d29 @ oq1 = clamp(qs1 - filter)
+
+ veor d24, d24, d22 @ *f_op0 = u^0x80
+ veor d23, d23, d22 @ *f_oq0 = u^0x80
+ veor d25, d25, d22 @ *f_op1 = u^0x80
+ veor d26, d26, d22 @ *f_oq1 = u^0x80
+
+ tst r7, #1
+ bxne lr
+
+ @ mbfilter flat && mask branch
+ @ TODO(fgalligan): Can I decrease the cycles shifting to consective d's
+ @ and using vibt on the q's?
+ vmov.u8 d29, #2
+ vaddl.u8 q15, d7, d8 @ op2 = p0 + q0
+ vmlal.u8 q15, d4, d27 @ op2 = p0 + q0 + p3 * 3
+ vmlal.u8 q15, d5, d29 @ op2 = p0 + q0 + p3 * 3 + p2 * 2
+ vaddl.u8 q10, d4, d5
+ vaddw.u8 q15, d6 @ op2=p1 + p0 + q0 + p3 * 3 + p2 *2
+ vaddl.u8 q14, d6, d9
+ vqrshrn.u16 d18, q15, #3 @ r_op2
+
+ vsub.i16 q15, q10
+ vaddl.u8 q10, d4, d6
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d7, d10
+ vqrshrn.u16 d19, q15, #3 @ r_op1
+
+ vsub.i16 q15, q10
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d8, d11
+ vqrshrn.u16 d20, q15, #3 @ r_op0
+
+ vsubw.u8 q15, d4 @ oq0 = op0 - p3
+ vsubw.u8 q15, d7 @ oq0 -= p0
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d9, d11
+ vqrshrn.u16 d21, q15, #3 @ r_oq0
+
+ vsubw.u8 q15, d5 @ oq1 = oq0 - p2
+ vsubw.u8 q15, d8 @ oq1 -= q0
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d10, d11
+ vqrshrn.u16 d22, q15, #3 @ r_oq1
+
+ vsubw.u8 q15, d6 @ oq2 = oq0 - p1
+ vsubw.u8 q15, d9 @ oq2 -= q1
+ vadd.i16 q15, q14
+ vqrshrn.u16 d27, q15, #3 @ r_oq2
+
+ @ Filter does not set op2 or oq2, so use p2 and q2.
+ vbif d18, d5, d16 @ t_op2 |= p2 & ~(flat & mask)
+ vbif d19, d25, d16 @ t_op1 |= f_op1 & ~(flat & mask)
+ vbif d20, d24, d16 @ t_op0 |= f_op0 & ~(flat & mask)
+ vbif d21, d23, d16 @ t_oq0 |= f_oq0 & ~(flat & mask)
+ vbif d22, d26, d16 @ t_oq1 |= f_oq1 & ~(flat & mask)
+
+ vbit d23, d27, d16 @ t_oq2 |= r_oq2 & (flat & mask)
+ vbif d23, d10, d16 @ t_oq2 |= q2 & ~(flat & mask)
+
+ tst r7, #2
+ bxne lr
+
+ @ wide_mbfilter flat2 && flat && mask branch
+ vmov.u8 d16, #7
+ vaddl.u8 q15, d7, d8 @ op6 = p0 + q0
+ vaddl.u8 q12, d2, d3
+ vaddl.u8 q13, d4, d5
+ vaddl.u8 q14, d1, d6
+ vmlal.u8 q15, d0, d16 @ op6 += p7 * 3
+ vadd.i16 q12, q13
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d2, d9
+ vadd.i16 q15, q12
+ vaddl.u8 q12, d0, d1
+ vaddw.u8 q15, d1
+ vaddl.u8 q13, d0, d2
+ vadd.i16 q14, q15, q14
+ vqrshrn.u16 d16, q15, #4 @ w_op6
+
+ vsub.i16 q15, q14, q12
+ vaddl.u8 q14, d3, d10
+ vqrshrn.u16 d24, q15, #4 @ w_op5
+
+ vsub.i16 q15, q13
+ vaddl.u8 q13, d0, d3
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d4, d11
+ vqrshrn.u16 d25, q15, #4 @ w_op4
+
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d0, d4
+ vsub.i16 q15, q13
+ vsub.i16 q14, q15, q14
+ vqrshrn.u16 d26, q15, #4 @ w_op3
+
+ vaddw.u8 q15, q14, d5 @ op2 += p2
+ vaddl.u8 q14, d0, d5
+ vaddw.u8 q15, d12 @ op2 += q4
+ vbif d26, d4, d17 @ op3 |= p3 & ~(f2 & f & m)
+ vqrshrn.u16 d27, q15, #4 @ w_op2
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d0, d6
+ vaddw.u8 q15, d6 @ op1 += p1
+ vaddw.u8 q15, d13 @ op1 += q5
+ vbif d27, d18, d17 @ op2 |= t_op2 & ~(f2 & f & m)
+ vqrshrn.u16 d18, q15, #4 @ w_op1
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d0, d7
+ vaddw.u8 q15, d7 @ op0 += p0
+ vaddw.u8 q15, d14 @ op0 += q6
+ vbif d18, d19, d17 @ op1 |= t_op1 & ~(f2 & f & m)
+ vqrshrn.u16 d19, q15, #4 @ w_op0
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d1, d8
+ vaddw.u8 q15, d8 @ oq0 += q0
+ vaddw.u8 q15, d15 @ oq0 += q7
+ vbif d19, d20, d17 @ op0 |= t_op0 & ~(f2 & f & m)
+ vqrshrn.u16 d20, q15, #4 @ w_oq0
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d2, d9
+ vaddw.u8 q15, d9 @ oq1 += q1
+ vaddl.u8 q4, d10, d15
+ vaddw.u8 q15, d15 @ oq1 += q7
+ vbif d20, d21, d17 @ oq0 |= t_oq0 & ~(f2 & f & m)
+ vqrshrn.u16 d21, q15, #4 @ w_oq1
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d3, d10
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d11, d15
+ vbif d21, d22, d17 @ oq1 |= t_oq1 & ~(f2 & f & m)
+ vqrshrn.u16 d22, q15, #4 @ w_oq2
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d4, d11
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d12, d15
+ vbif d22, d23, d17 @ oq2 |= t_oq2 & ~(f2 & f & m)
+ vqrshrn.u16 d23, q15, #4 @ w_oq3
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d5, d12
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d13, d15
+ vbif d16, d1, d17 @ op6 |= p6 & ~(f2 & f & m)
+ vqrshrn.u16 d1, q15, #4 @ w_oq4
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d6, d13
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d14, d15
+ vbif d24, d2, d17 @ op5 |= p5 & ~(f2 & f & m)
+ vqrshrn.u16 d2, q15, #4 @ w_oq5
+
+ vsub.i16 q15, q14
+ vbif d25, d3, d17 @ op4 |= p4 & ~(f2 & f & m)
+ vadd.i16 q15, q4
+ vbif d23, d11, d17 @ oq3 |= q3 & ~(f2 & f & m)
+ vqrshrn.u16 d3, q15, #4 @ w_oq6
+ vbif d1, d12, d17 @ oq4 |= q4 & ~(f2 & f & m)
+ vbif d2, d13, d17 @ oq5 |= q5 & ~(f2 & f & m)
+ vbif d3, d14, d17 @ oq6 |= q6 & ~(f2 & f & m)
+
+ bx lr
+ .size vp9_wide_mbfilter_neon, .-vp9_wide_mbfilter_neon @ ENDP @ |vp9_wide_mbfilter_neon|
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/rsDefines.h b/rsDefines.h
index fbc63cd..e25cf71 100644
--- a/rsDefines.h
+++ b/rsDefines.h
@@ -365,7 +365,8 @@
RS_SCRIPT_INTRINSIC_ID_BLEND = 7,
RS_SCRIPT_INTRINSIC_ID_3DLUT = 8,
RS_SCRIPT_INTRINSIC_ID_HISTOGRAM = 9,
- RS_SCRIPT_INTRINSIC_ID_INTER_PRED= 10
+ RS_SCRIPT_INTRINSIC_ID_INTER_PRED= 10,
+ RS_SCRIPT_INTRINSIC_ID_LOOP_FILTER = 11
};
typedef struct {