Move RS runtime to frameworks/rs.

Bug: 7342767
Change-Id: Ia45064a5257b8ce460918f327670e3be550d4b56
diff --git a/driver/Android.mk b/driver/Android.mk
new file mode 100644
index 0000000..271de29
--- /dev/null
+++ b/driver/Android.mk
@@ -0,0 +1,4 @@
+
+LOCAL_PATH:=$(call my-dir)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/driver/runtime/Android.mk b/driver/runtime/Android.mk
new file mode 100755
index 0000000..99d685c
--- /dev/null
+++ b/driver/runtime/Android.mk
@@ -0,0 +1,117 @@
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+# C/LLVM-IR source files for the library
+clcore_base_files := \
+    rs_allocation.c \
+    rs_cl.c \
+    rs_core.c \
+    rs_element.c \
+    rs_mesh.c \
+    rs_matrix.c \
+    rs_program.c \
+    rs_sample.c \
+    rs_sampler.c \
+    convert.ll \
+    rsClamp.ll
+
+clcore_files := \
+    $(clcore_base_files) \
+    math.ll \
+    arch/generic.c \
+    arch/sqrt.c \
+    arch/dot_length.c
+
+clcore_neon_files := \
+    $(clcore_base_files) \
+    math.ll \
+    arch/neon.ll \
+    arch/sqrt.c \
+    arch/dot_length.c \
+    arch/clamp.c
+
+ifeq ($(ARCH_X86_HAVE_SSE2), true)
+    clcore_x86_files := \
+    $(clcore_base_files) \
+    arch/x86_generic.c \
+    arch/x86_clamp.ll \
+    arch/x86_math.ll
+
+    ifeq ($(ARCH_X86_HAVE_SSE3), true)
+        clcore_x86_files += arch/x86_dot_length.ll
+    else
+        # FIXME: without SSE3, it is still able to get better code through PSHUFD. But,
+        # so far, there is no such device with SSE2 only.
+        clcore_x86_files += arch/dot_length.c
+    endif
+endif
+
+ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+  RS_VERSION := $(PLATFORM_SDK_VERSION)
+else
+  # Increment by 1 whenever this is not a final release build, since we want to
+  # be able to see the RS version number change during development.
+  # See build/core/version_defaults.mk for more information about this.
+  RS_VERSION := "(1 + $(PLATFORM_SDK_VERSION))"
+endif
+
+# Build the base version of the library
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+
+# Build a debug version of the library
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore_debug.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+rs_debug_runtime := 1
+LOCAL_SRC_FILES := $(clcore_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+
+# Build an optimized version of the library if the device is SSE2- or above
+# capable.
+ifeq ($(ARCH_X86_HAVE_SSE2),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore_x86.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_x86_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+endif
+
+# Build a NEON-enabled version of the library (if possible)
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+# Disable NEON on cortex-a15 temporarily
+ifneq ($(strip $(TARGET_CPU_VARIANT)), cortex-a15)
+  include $(CLEAR_VARS)
+  LOCAL_MODULE := libclcore_neon.bc
+  LOCAL_MODULE_TAGS := optional
+  LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+  LOCAL_SRC_FILES := $(clcore_neon_files)
+  LOCAL_CFLAGS += -DARCH_ARM_HAVE_NEON
+
+  include $(LOCAL_PATH)/build_bc_lib.mk
+endif
+endif
diff --git a/driver/runtime/arch/clamp.c b/driver/runtime/arch/clamp.c
new file mode 100644
index 0000000..c2c2226
--- /dev/null
+++ b/driver/runtime/arch/clamp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rs_types.rsh"
+
+#define S_CLAMP(T) \
+extern T __attribute__((overloadable)) clamp(T amount, T low, T high) {             \
+    return amount < low ? low : (amount > high ? high : amount);                    \
+}
+
+//_CLAMP(float);  implemented in .ll
+S_CLAMP(double);
+S_CLAMP(char);
+S_CLAMP(uchar);
+S_CLAMP(short);
+S_CLAMP(ushort);
+S_CLAMP(int);
+S_CLAMP(uint);
+S_CLAMP(long);
+S_CLAMP(ulong);
+
+#undef S_CLAMP
+
+
+                                                                                    \
+#define V_CLAMP(T) \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T##2 low, T##2 high) { \
+    T##2 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T##3 low, T##3 high) { \
+    T##3 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T##4 low, T##4 high) { \
+    T##4 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    r.w = amount.w < low.w ? low.w : (amount.w > high.w ? high.w : amount.w);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T low, T high) {       \
+    T##2 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T low, T high) {       \
+    T##3 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T low, T high) {       \
+    T##4 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    r.w = amount.w < low ? low : (amount.w > high ? high : amount.w);               \
+    return r;                                                                       \
+}
+
+//V_CLAMP(float);  implemented in .ll
+V_CLAMP(double);
+V_CLAMP(char);
+V_CLAMP(uchar);
+V_CLAMP(short);
+V_CLAMP(ushort);
+#ifndef ARCH_ARM_HAVE_NEON
+    V_CLAMP(int);  //implemented in .ll
+    V_CLAMP(uint);  //implemented in .ll
+#endif
+V_CLAMP(long);
+V_CLAMP(ulong);
+
+#undef _CLAMP
+
diff --git a/driver/runtime/arch/dot_length.c b/driver/runtime/arch/dot_length.c
new file mode 100644
index 0000000..94c99b6
--- /dev/null
+++ b/driver/runtime/arch/dot_length.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rs_types.rsh"
+
+extern float __attribute__((overloadable)) dot(float lhs, float rhs) {
+    return lhs * rhs;
+}
+extern float __attribute__((overloadable)) dot(float2 lhs, float2 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y;
+}
+extern float __attribute__((overloadable)) dot(float3 lhs, float3 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z;
+}
+extern float __attribute__((overloadable)) dot(float4 lhs, float4 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z + lhs.w*rhs.w;
+}
+
+extern float __attribute__((overloadable)) fabs(float);
+extern float __attribute__((overloadable)) sqrt(float);
+
+extern float __attribute__((overloadable)) length(float v) {
+    return fabs(v);
+}
+extern float __attribute__((overloadable)) length(float2 v) {
+    return sqrt(v.x*v.x + v.y*v.y);
+}
+extern float __attribute__((overloadable)) length(float3 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float __attribute__((overloadable)) length(float4 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
diff --git a/driver/runtime/arch/generic.c b/driver/runtime/arch/generic.c
new file mode 100644
index 0000000..da83c2a
--- /dev/null
+++ b/driver/runtime/arch/generic.c
@@ -0,0 +1,948 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(short4);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(float4);
+extern float4 __attribute__((overloadable)) convert_float4(uchar4);
+extern float __attribute__((overloadable)) sqrt(float);
+
+/*
+ * CLAMP
+ */
+#define _CLAMP(T) \
+extern T __attribute__((overloadable)) clamp(T amount, T low, T high) {             \
+    return amount < low ? low : (amount > high ? high : amount);                    \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T##2 low, T##2 high) { \
+    T##2 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T##3 low, T##3 high) { \
+    T##3 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T##4 low, T##4 high) { \
+    T##4 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    r.w = amount.w < low.w ? low.w : (amount.w > high.w ? high.w : amount.w);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T low, T high) {       \
+    T##2 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T low, T high) {       \
+    T##3 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T low, T high) {       \
+    T##4 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    r.w = amount.w < low ? low : (amount.w > high ? high : amount.w);               \
+    return r;                                                                       \
+}
+
+_CLAMP(float);
+_CLAMP(double);
+_CLAMP(char);
+_CLAMP(uchar);
+_CLAMP(short);
+_CLAMP(ushort);
+_CLAMP(int);
+_CLAMP(uint);
+_CLAMP(long);
+_CLAMP(ulong);
+
+#undef _CLAMP
+
+/*
+ * FMAX
+ */
+
+extern float __attribute__((overloadable)) fmax(float v1, float v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    r.w = v1.w > v2 ? v1.w : v2;
+    return r;
+}
+
+extern float __attribute__((overloadable)) fmin(float v1, float v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+
+/*
+ * FMIN
+ */
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    r.w = v1.w < v2 ? v1.w : v2;
+    return r;
+}
+
+
+/*
+ * MAX
+ */
+
+extern char __attribute__((overloadable)) max(char v1, char v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) max(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) max(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) max(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern short __attribute__((overloadable)) max(short v1, short v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) max(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) max(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) max(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int __attribute__((overloadable)) max(int v1, int v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) max(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) max(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) max(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) max(int64_t v1, int64_t v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) max(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) max(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) max(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) max(uchar v1, uchar v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) max(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) max(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) max(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) max(ushort v1, ushort v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) max(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) max(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) max(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) max(uint v1, uint v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) max(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) max(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) max(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) max(ulong v1, ulong v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) max(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) max(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) max(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) max(float v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float2 v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float3 v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float4 v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+
+/*
+ * MIN
+ */
+
+extern int8_t __attribute__((overloadable)) min(int8_t v1, int8_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) min(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) min(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) min(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int16_t __attribute__((overloadable)) min(int16_t v1, int16_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) min(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) min(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) min(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int32_t __attribute__((overloadable)) min(int32_t v1, int32_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) min(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) min(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) min(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) min(int64_t v1, int64_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) min(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) min(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) min(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) min(uchar v1, uchar v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) min(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) min(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) min(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) min(ushort v1, ushort v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) min(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) min(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) min(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) min(uint v1, uint v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) min(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) min(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) min(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) min(ulong v1, ulong v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) min(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) min(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) min(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) min(float v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float2 v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float3 v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float4 v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+/*
+ * YUV
+ */
+
+extern uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v) {
+    short Y = ((short)y) - 16;
+    short U = ((short)u) - 128;
+    short V = ((short)v) - 128;
+
+    short4 p;
+    p.r = (Y * 298 + V * 409 + 128) >> 8;
+    p.g = (Y * 298 - U * 100 - V * 208 + 128) >> 8;
+    p.b = (Y * 298 + U * 516 + 128) >> 8;
+    p.a = 255;
+    p.r = rsClamp(p.r, (short)0, (short)255);
+    p.g = rsClamp(p.g, (short)0, (short)255);
+    p.b = rsClamp(p.b, (short)0, (short)255);
+
+    return convert_uchar4(p);
+}
+
+static float4 yuv_U_values = {0.f, -0.392f * 0.003921569f, +2.02 * 0.003921569f, 0.f};
+static float4 yuv_V_values = {1.603f * 0.003921569f, -0.815f * 0.003921569f, 0.f, 0.f};
+
+extern float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v) {
+    float4 color = (float)y * 0.003921569f;
+    float4 fU = ((float)u) - 128.f;
+    float4 fV = ((float)v) - 128.f;
+
+    color += fU * yuv_U_values;
+    color += fV * yuv_V_values;
+    color = clamp(color, 0.f, 1.f);
+    return color;
+}
+
+
+/*
+ * half_RECIP
+ */
+
+extern float __attribute__((overloadable)) half_recip(float v) {
+    // FIXME:  actual algorithm for generic approximate reciprocal
+    return 1.f / v;
+}
+
+extern float2 __attribute__((overloadable)) half_recip(float2 v) {
+    float2 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_recip(float3 v) {
+    float3 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_recip(float4 v) {
+    float4 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    r.w = half_recip(r.w);
+    return r;
+}
+
+
+/*
+ * half_SQRT
+ */
+
+extern float __attribute__((overloadable)) half_sqrt(float v) {
+    return sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_sqrt(float2 v) {
+    float2 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_sqrt(float3 v) {
+    float3 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_sqrt(float4 v) {
+    float4 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    r.w = half_sqrt(v.w);
+    return r;
+}
+
+
+/*
+ * half_rsqrt
+ */
+
+extern float __attribute__((overloadable)) half_rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_rsqrt(float2 v) {
+    float2 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_rsqrt(float3 v) {
+    float3 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_rsqrt(float4 v) {
+    float4 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    r.w = half_rsqrt(v.w);
+    return r;
+}
+
+/**
+ * matrix ops
+ */
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float4 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + (m->m[12] * in.w);
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + (m->m[13] * in.w);
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + (m->m[14] * in.w);
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + (m->m[15] * in.w);
+    return ret;
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float3 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + m->m[15];
+    return ret;
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float2 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + m->m[15];
+    return ret;
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float3 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y) + (m->m[6] * in.z);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y) + (m->m[7] * in.z);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y) + (m->m[8] * in.z);
+    return ret;
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float2 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y);
+    return ret;
+}
+
+/**
+ * Pixel Ops
+ */
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+{
+    uchar4 c;
+    c.x = (uchar)clamp((r * 255.f + 0.5f), 0.f, 255.f);
+    c.y = (uchar)clamp((g * 255.f + 0.5f), 0.f, 255.f);
+    c.z = (uchar)clamp((b * 255.f + 0.5f), 0.f, 255.f);
+    c.w = 255;
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+{
+    uchar4 c;
+    c.x = (uchar)clamp((r * 255.f + 0.5f), 0.f, 255.f);
+    c.y = (uchar)clamp((g * 255.f + 0.5f), 0.f, 255.f);
+    c.z = (uchar)clamp((b * 255.f + 0.5f), 0.f, 255.f);
+    c.w = (uchar)clamp((a * 255.f + 0.5f), 0.f, 255.f);
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    color = clamp(color, 0.f, 255.f);
+    uchar4 c = {color.x, color.y, color.z, 255};
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    color = clamp(color, 0.f, 255.f);
+    uchar4 c = {color.x, color.y, color.z, color.w};
+    return c;
+}
+
diff --git a/driver/runtime/arch/neon.ll b/driver/runtime/arch/neon.ll
new file mode 100644
index 0000000..4a1172b
--- /dev/null
+++ b/driver/runtime/arch/neon.ll
@@ -0,0 +1,1172 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;               INTRINSICS               ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                HELPERS                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define internal <4 x float> @smear_4f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+define internal <4 x i32> @smear_4i(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+  %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+  %4 = insertelement <4 x i32> %3, i32 %in, i32 3
+  ret <4 x i32> %4
+}
+
+define internal <4 x i16> @smear_4s(i16 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i16> undef, i16 %in, i32 0
+  %2 = insertelement <4 x i16> %1, i16 %in, i32 1
+  %3 = insertelement <4 x i16> %2, i16 %in, i32 2
+  %4 = insertelement <4 x i16> %3, i16 %in, i32 3
+  ret <4 x i16> %4
+}
+
+
+
+define internal <2 x float> @smear_2f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x float> undef, float %in, i32 0
+  %2 = insertelement <2 x float> %1, float %in, i32 1
+  ret <2 x float> %2
+}
+
+define internal <2 x i32> @smear_2i(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <2 x i32> %1, i32 %in, i32 1
+  ret <2 x i32> %2
+}
+
+define internal <2 x i16> @smear_2s(i16 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x i16> undef, i16 %in, i32 0
+  %2 = insertelement <2 x i16> %1, i16 %in, i32 1
+  ret <2 x i16> %2
+}
+
+
+define internal <4 x i32> @smear_4i32(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+  %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+  %4 = insertelement <4 x i32> %3, i32 %in, i32 3
+  ret <4 x i32> %4
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 CLAMP                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %low, <4 x float> %high) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %value, <4 x float> %high) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %low) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <4 x float> @_Z5clampDv4_fff(<4 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %out = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %_low, <4 x float> %_high) nounwind readonly
+  ret <4 x float> %out
+}
+
+define <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %value, <3 x float> %low, <3 x float> %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x float> %low, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x float> %high, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <3 x float> @_Z5clampDv3_fff(<3 x float> %value, float %low, float %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %value, <2 x float> %low, <2 x float> %high) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %high) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %1, <2 x float> %low) nounwind readnone
+  ret <2 x float> %2
+}
+
+define <2 x float> @_Z5clampDv2_fff(<2 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <2 x float> @smear_2f(float %high) nounwind readnone
+  %_low = tail call <2 x float> @smear_2f(float %low) nounwind readnone
+  %a = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %_high) nounwind readnone
+  %b = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %a, <2 x float> %_low) nounwind readnone
+  ret <2 x float> %b
+}
+
+define float @_Z5clampfff(float %value, float %low, float %high) nounwind readonly {
+  %1 = fcmp olt float %value, %high
+  %2 = select i1 %1, float %value, float %high
+  %3 = fcmp ogt float %2, %low
+  %4 = select i1 %3, float %2, float %low
+  ret float %4
+}
+
+
+
+define <4 x i32> @_Z5clampDv4_iS_S_(<4 x i32> %value, <4 x i32> %low, <4 x i32> %high) nounwind readonly {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %value, <4 x i32> %high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @_Z5clampDv4_iii(<4 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %value, <4 x i32> %_high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %_low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <3 x i32> @_Z5clampDv3_iS_S_(<3 x i32> %value, <3 x i32> %low, <3 x i32> %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x i32> %low, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x i32> %high, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <3 x i32> @_Z5clampDv3_iii(<3 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %a = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <2 x i32> @_Z5clampDv2_iS_S_(<2 x i32> %value, <2 x i32> %low, <2 x i32> %high) nounwind readonly {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %value, <2 x i32> %high) nounwind readnone
+  %2 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %low) nounwind readnone
+  ret <2 x i32> %2
+}
+
+define <2 x i32> @_Z5clampDv2_iii(<2 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <2 x i32> @smear_2i(i32 %high) nounwind readnone
+  %_low = tail call <2 x i32> @smear_2i(i32 %low) nounwind readnone
+  %a = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %value, <2 x i32> %_high) nounwind readnone
+  %b = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %a, <2 x i32> %_low) nounwind readnone
+  ret <2 x i32> %b
+}
+
+
+
+define <4 x i32> @_Z5clampDv4_jS_S_(<4 x i32> %value, <4 x i32> %low, <4 x i32> %high) nounwind readonly {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %value, <4 x i32> %high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @_Z5clampDv4_jjj(<4 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %value, <4 x i32> %_high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %_low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <3 x i32> @_Z5clampDv3_jS_S_(<3 x i32> %value, <3 x i32> %low, <3 x i32> %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x i32> %low, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x i32> %high, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <3 x i32> @_Z5clampDv3_jjj(<3 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %a = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <2 x i32> @_Z5clampDv2_jS_S_(<2 x i32> %value, <2 x i32> %low, <2 x i32> %high) nounwind readonly {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %value, <2 x i32> %high) nounwind readnone
+  %2 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %low) nounwind readnone
+  ret <2 x i32> %2
+}
+
+define <2 x i32> @_Z5clampDv2_jjj(<2 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <2 x i32> @smear_2i(i32 %high) nounwind readnone
+  %_low = tail call <2 x i32> @smear_2i(i32 %low) nounwind readnone
+  %a = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %value, <2 x i32> %_high) nounwind readnone
+  %b = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %a, <2 x i32> %_low) nounwind readnone
+  ret <2 x i32> %b
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMAX                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fmaxff(float %v1, float %v2) nounwind readonly {
+  %1 = fcmp ogt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMIN                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fminff(float %v1, float %v2) nounwind readnone {
+  %1 = fcmp olt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MAX                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3maxcc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp sgt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3maxss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp sgt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp sgt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp sgt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3maxhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3maxtt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ugt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ugt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3maxff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fmaxff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3maxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3maxDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MIN                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3mincc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp slt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3minss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp slt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp slt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp slt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3minhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3mintt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ult i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ult i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3minff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fminff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3minDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3minDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  YUV                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+@yuv_U = internal constant <4 x i32> <i32 0, i32 -100, i32 516, i32 0>, align 16
+@yuv_V = internal constant <4 x i32> <i32 409, i32 -208, i32 0, i32 0>, align 16
+@yuv_0 = internal constant <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+@yuv_255 = internal constant <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, align 16
+
+
+define <4 x i8> @_Z18rsYuvToRGBA_uchar4hhh(i8 %pY, i8 %pU, i8 %pV) nounwind readnone alwaysinline {
+  %_sy = zext i8 %pY to i32
+  %_su = zext i8 %pU to i32
+  %_sv = zext i8 %pV to i32
+
+  %_sy2 = add i32 -16, %_sy
+  %_sy3 = mul i32 298, %_sy2
+  %_su2 = add i32 -128, %_su
+  %_sv2 = add i32 -128, %_sv
+  %_y = tail call <4 x i32> @smear_4i32(i32 %_sy3) nounwind readnone
+  %_u = tail call <4 x i32> @smear_4i32(i32 %_su2) nounwind readnone
+  %_v = tail call <4 x i32> @smear_4i32(i32 %_sv2) nounwind readnone
+
+  %mu = load <4 x i32>* @yuv_U, align 8
+  %mv = load <4 x i32>* @yuv_V, align 8
+  %_u2 = mul <4 x i32> %_u, %mu
+  %_v2 = mul <4 x i32> %_v, %mv
+  %_y2 = add <4 x i32> %_y, %_u2
+  %_y3 = add <4 x i32> %_y2, %_v2
+
+ ; %r1 = tail call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %_y3, <4 x i32> <i32 8, i32 8, i32 8, i32 8>) nounwind readnone
+;  %r2 = trunc <4 x i16> %r1 to <4 x i8>
+;  ret <4 x i8> %r2
+
+  %c0 = load <4 x i32>* @yuv_0, align 8
+  %c255 = load <4 x i32>* @yuv_255, align 8
+  %r1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %_y3, <4 x i32> %c0) nounwind readnone
+  %r2 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %r1, <4 x i32> %c255) nounwind readnone
+  %r3 = lshr <4 x i32> %r2, <i32 8, i32 8, i32 8, i32 8>
+  %r4 = trunc <4 x i32> %r3 to <4 x i8>
+  ret <4 x i8> %r4
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_RECIP              ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z10half_recipf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %1) nounwind readnone
+  %3 = extractelement <2 x float> %2, i32 0
+  ret float %3
+}
+
+define <2 x float> @_Z10half_recip2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %v) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z10half_recip3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %1) nounwind readnone
+  %3 = shufflevector <4 x float> %2, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %3
+}
+
+define <4 x float> @_Z10half_recip4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %v) nounwind readnone
+  ret <4 x float> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_SQRT               ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z9half_sqrtf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %1) nounwind readnone
+  %3 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %2) nounwind readnone
+  %4 = extractelement <2 x float> %3, i32 0
+  ret float %4
+}
+
+define <2 x float> @_Z9half_sqrt2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %v) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define <3 x float> @_Z9half_sqrt3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <4 x float> @_Z9half_sqrt4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %v) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_RSQRT              ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z10half_rsqrtf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %1) nounwind readnone
+  %3 = extractelement <2 x float> %2, i32 0
+  ret float %3
+}
+
+define <2 x float> @_Z10half_rsqrt2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %v) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z10half_rsqrt3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %1) nounwind readnone
+  %3 = shufflevector <4 x float> %2, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %3
+}
+
+define <4 x float> @_Z10half_rsqrt4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %v) nounwind readnone
+  ret <4 x float> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              matrix                    ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
+
+%struct.rs_matrix4x4 = type { [16 x float] }
+%struct.rs_matrix3x3 = type { [9 x float] }
+%struct.rs_matrix2x2 = type { [4 x float] }
+
+define internal <4 x float> @smear_f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to i8*
+  %xm = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %px2, i32 4) nounwind
+
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to i8*
+  %ym = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %py2, i32 4) nounwind
+
+  %pz = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 5
+  %pz2 = bitcast float* %pz to i8*
+  %zm2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %pz2, i32 4) nounwind
+  %zm = shufflevector <4 x float> %zm2, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a4, %a3
+  %a6 = shufflevector <4 x float> %a5, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a6
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = shufflevector <4 x float> %a3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a4
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %x0 = extractelement <4 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <4 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <4 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+  %w0 = extractelement <4 x float> %in, i32 3
+  %w = tail call <4 x float> @smear_f(float %w0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a3, %a4
+  %a6 = fmul <4 x float> %w, %wm
+  %a7 = fadd <4 x float> %a5, %a6
+  ret <4 x float> %a7
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  %a5 = fmul <4 x float> %z, %zm
+  %a6 = fadd <4 x float> %a4, %a5
+  ret <4 x float> %a6
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  ret <4 x float> %a4
+}
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              pixel ops                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+@fc_255.0 = internal constant <4 x float> <float 255.0, float 255.0, float 255.0, float 255.0>, align 16
+@fc_0.5 = internal constant <4 x float> <float 0.5, float 0.5, float 0.5, float 0.5>, align 16
+@fc_0 = internal constant <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, align 16
+
+declare <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone
+declare <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %color) nounwind readnone {
+    %f255 = load <4 x float>* @fc_255.0, align 16
+    %f05 = load <4 x float>* @fc_0.5, align 16
+    %f0 = load <4 x float>* @fc_0, align 16
+    %v1 = fmul <4 x float> %f255, %color
+    %v2 = fadd <4 x float> %f05, %v1
+    %v3 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %v2, <4 x float> %f0, <4 x float> %f255) nounwind readnone
+    %v4 = tail call <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %v3) nounwind readnone
+    ret <4 x i8> %v4
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv3_f(<3 x float> %color) nounwind readnone {
+    %1 = shufflevector <3 x float> %color, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+    %2 = insertelement <4 x float> %1, float 1.0, i32 3
+    %3 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %2) nounwind readnone
+    ret <4 x i8> %3
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+define <4 x i8> @_Z17rsPackColorTo8888fff(float %r, float %g, float %b) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float 1.0, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+define <4 x i8> @_Z17rsPackColorTo8888ffff(float %r, float %g, float %b, float %a) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float %a, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
diff --git a/driver/runtime/arch/sqrt.c b/driver/runtime/arch/sqrt.c
new file mode 100755
index 0000000..f1dac5f
--- /dev/null
+++ b/driver/runtime/arch/sqrt.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+#define FN_FUNC_FN(fnc)                                         \
+extern float2 __attribute__((overloadable)) fnc(float2 v) { \
+    float2 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern float3 __attribute__((overloadable)) fnc(float3 v) { \
+    float3 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern float4 __attribute__((overloadable)) fnc(float4 v) { \
+    float4 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+extern float __attribute__((overloadable)) sqrt(float);
+
+FN_FUNC_FN(sqrt)
diff --git a/driver/runtime/arch/x86_clamp.ll b/driver/runtime/arch/x86_clamp.ll
new file mode 100755
index 0000000..422e9f6
--- /dev/null
+++ b/driver/runtime/arch/x86_clamp.ll
@@ -0,0 +1,74 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+
+define <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %in, <4 x float> %low, <4 x float> %high) nounwind readnone alwaysinline {
+  %1 = tail call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %in, <4 x float> %high) nounwind readnone
+  %2 = tail call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %low) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %in, <3 x float> %low, <3 x float> %high) nounwind readnone alwaysinline {
+  %1 = shufflevector <3 x float> %in, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %low, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = shufflevector <3 x float> %high, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
+  %5 = shufflevector <4 x float> %4, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %5
+}
+
+define <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %in, <2 x float> %low, <2 x float> %high) nounwind readnone alwaysinline {
+  %1 = shufflevector <2 x float> %in, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <2 x float> %low, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = shufflevector <2 x float> %high, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
+  %5 = shufflevector <4 x float> %4, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %5
+}
+
+define float @_Z5clampfff(float %in, float %low, float %high) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> undef, float %low, i32 0
+  %3 = insertelement <4 x float> undef, float %high, i32 0
+  %4 = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %3) nounwind readnone
+  %5 = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %4, <4 x float> %2) nounwind readnone
+  %6 = extractelement <4 x float> %5, i32 0
+  ret float %6
+}
+
+define <4 x float> @_Z5clampDv4_fff(<4 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <4 x float> undef, float %low, i32 0
+  %2 = insertelement <4 x float> %1, float %low, i32 1
+  %3 = insertelement <4 x float> %2, float %low, i32 2
+  %4 = insertelement <4 x float> %3, float %low, i32 3
+  %5 = insertelement <4 x float> undef, float %high, i32 0
+  %6 = insertelement <4 x float> %5, float %high, i32 1
+  %7 = insertelement <4 x float> %6, float %high, i32 2
+  %8 = insertelement <4 x float> %7, float %high, i32 3
+  %9 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %in, <4 x float> %4, <4 x float> %8) nounwind readnone
+  ret <4 x float> %9
+}
+
+define <3 x float> @_Z5clampDv3_fff(<3 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <3 x float> undef, float %low, i32 0
+  %2 = insertelement <3 x float> %1, float %low, i32 1
+  %3 = insertelement <3 x float> %2, float %low, i32 2
+  %4 = insertelement <3 x float> undef, float %high, i32 0
+  %5 = insertelement <3 x float> %4, float %high, i32 1
+  %6 = insertelement <3 x float> %5, float %high, i32 2
+  %7 = tail call <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %in, <3 x float> %3, <3 x float> %6) nounwind readnone
+  ret <3 x float> %7
+}
+
+define <2 x float> @_Z5clampDv2_fff(<2 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <2 x float> undef, float %low, i32 0
+  %2 = insertelement <2 x float> %1, float %low, i32 1
+  %3 = insertelement <2 x float> undef, float %high, i32 0
+  %4 = insertelement <2 x float> %3, float %high, i32 1
+  %5 = tail call <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %in, <2 x float> %2, <2 x float> %4) nounwind readnone
+  ret <2 x float> %5
+}
diff --git a/driver/runtime/arch/x86_dot_length.ll b/driver/runtime/arch/x86_dot_length.ll
new file mode 100644
index 0000000..21f2f3e
--- /dev/null
+++ b/driver/runtime/arch/x86_dot_length.ll
@@ -0,0 +1,75 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
+declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define float @_Z3dotDv4_fS_(<4 x float> %lhs, <4 x float> %rhs) nounwind readnone {
+  %1 = fmul <4 x float> %lhs, %rhs
+  %2 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  ret float %4
+}
+
+define float @_Z3dotDv3_fS_(<3 x float> %lhs, <3 x float> %rhs) nounwind readnone {
+  %1 = fmul <3 x float> %lhs, %rhs
+  %2 = shufflevector <3 x float> %1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = bitcast <4 x float> %2 to <2 x i64>
+  %4 = tail call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %3, i32 32)
+  %5 = bitcast <2 x i64> %4 to <4 x float>
+  %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %5, <4 x float> %5) nounwind readnone
+  %7 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %6, <4 x float> %6) nounwind readnone
+  %8 = extractelement <4 x float> %7, i32 0
+  ret float %8
+}
+
+define float @_Z3dotDv2_fS_(<2 x float> %lhs, <2 x float> %rhs) nounwind readnone {
+  %1 = fmul <2 x float> %lhs, %rhs
+  %2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  ret float %4
+}
+
+define float @_Z3dotff(float %lhs, float %rhs) nounwind readnone {
+  %1 = fmul float %lhs, %rhs
+  ret float %1
+}
+
+define float @_Z6lengthDv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <4 x float> %in, %in
+  %2 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  %5 = tail call float @llvm.sqrt.f32(float %4) nounwind readnone
+  ret float %5
+}
+
+define float @_Z6lengthDv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <3 x float> %in, %in
+  %2 = shufflevector <3 x float> %1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = bitcast <4 x float> %2 to <2 x i64>
+  %4 = tail call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %3, i32 32)
+  %5 = bitcast <2 x i64> %4 to <4 x float>
+  %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %5, <4 x float> %5) nounwind readnone
+  %7 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %6, <4 x float> %6) nounwind readnone
+  %8 = extractelement <4 x float> %7, i32 0
+  %9 = tail call float @llvm.sqrt.f32(float %8) nounwind readnone
+  ret float %9
+}
+
+define float @_Z6lengthDv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <2 x float> %in, %in
+  %2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  %5 = tail call float @llvm.sqrt.f32(float %4) nounwind readnone
+  ret float %5
+}
+
+define float @_Z6lengthf(float %in) nounwind readnone alwaysinline {
+  ret float %in
+}
+
diff --git a/driver/runtime/arch/x86_generic.c b/driver/runtime/arch/x86_generic.c
new file mode 100644
index 0000000..c46c54a
--- /dev/null
+++ b/driver/runtime/arch/x86_generic.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+extern float4 __attribute__((overloadable)) clamp(float4 amount, float4 low, float4 high);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(short4);
+extern float __attribute__((overloadable)) sqrt(float);
+
+/*
+ * FMAX
+ */
+
+extern float __attribute__((overloadable)) fmax(float v1, float v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    r.w = v1.w > v2 ? v1.w : v2;
+    return r;
+}
+
+extern float __attribute__((overloadable)) fmin(float v1, float v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+
+/*
+ * FMIN
+ */
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    r.w = v1.w < v2 ? v1.w : v2;
+    return r;
+}
+
+
+/*
+ * MAX
+ */
+
+extern char __attribute__((overloadable)) max(char v1, char v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) max(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) max(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) max(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern short __attribute__((overloadable)) max(short v1, short v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) max(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) max(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) max(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int __attribute__((overloadable)) max(int v1, int v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) max(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) max(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) max(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) max(int64_t v1, int64_t v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) max(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) max(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) max(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) max(uchar v1, uchar v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) max(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) max(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) max(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) max(ushort v1, ushort v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) max(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) max(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) max(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) max(uint v1, uint v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) max(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) max(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) max(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) max(ulong v1, ulong v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) max(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) max(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) max(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) max(float v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float2 v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float3 v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float4 v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+
+/*
+ * MIN
+ */
+
+extern int8_t __attribute__((overloadable)) min(int8_t v1, int8_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) min(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) min(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) min(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int16_t __attribute__((overloadable)) min(int16_t v1, int16_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) min(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) min(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) min(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int32_t __attribute__((overloadable)) min(int32_t v1, int32_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) min(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) min(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) min(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) min(int64_t v1, int64_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) min(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) min(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) min(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) min(uchar v1, uchar v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) min(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) min(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) min(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) min(ushort v1, ushort v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) min(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) min(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) min(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) min(uint v1, uint v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) min(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) min(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) min(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) min(ulong v1, ulong v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) min(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) min(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) min(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) min(float v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float2 v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float3 v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float4 v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+
+/*
+ * YUV
+ */
+
+extern uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v) {
+    short Y = ((short)y) - 16;
+    short U = ((short)u) - 128;
+    short V = ((short)v) - 128;
+
+    short4 p;
+    p.r = (Y * 298 + V * 409 + 128) >> 8;
+    p.g = (Y * 298 - U * 100 - V * 208 + 128) >> 8;
+    p.b = (Y * 298 + U * 516 + 128) >> 8;
+    p.a = 255;
+    p.r = rsClamp(p.r, (short)0, (short)255);
+    p.g = rsClamp(p.g, (short)0, (short)255);
+    p.b = rsClamp(p.b, (short)0, (short)255);
+
+    return convert_uchar4(p);
+}
+
+static float4 yuv_U_values = {0.f, -0.392f * 0.003921569f, +2.02 * 0.003921569f, 0.f};
+static float4 yuv_V_values = {1.603f * 0.003921569f, -0.815f * 0.003921569f, 0.f, 0.f};
+
+extern float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v) {
+    float4 color = (float)y * 0.003921569f;
+    float4 fU = ((float)u) - 128.f;
+    float4 fV = ((float)v) - 128.f;
+
+    color += fU * yuv_U_values;
+    color += fV * yuv_V_values;
+    color = clamp(color, 0.f, 1.f);
+    return color;
+}
+
+
+/*
+ * half_RECIP
+ */
+
+extern float __attribute__((overloadable)) half_recip(float v) {
+    // FIXME:  actual algorithm for generic approximate reciprocal
+    return 1.f / v;
+}
+
+extern float2 __attribute__((overloadable)) half_recip(float2 v) {
+    float2 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_recip(float3 v) {
+    float3 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_recip(float4 v) {
+    float4 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    r.w = half_recip(r.w);
+    return r;
+}
+
+
+/*
+ * half_SQRT
+ */
+
+extern float __attribute__((overloadable)) half_sqrt(float v) {
+    return sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_sqrt(float2 v) {
+    float2 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_sqrt(float3 v) {
+    float3 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_sqrt(float4 v) {
+    float4 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    r.w = half_sqrt(v.w);
+    return r;
+}
+
+
+/*
+ * half_rsqrt
+ */
+
+extern float __attribute__((overloadable)) half_rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_rsqrt(float2 v) {
+    float2 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_rsqrt(float3 v) {
+    float3 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_rsqrt(float4 v) {
+    float4 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    r.w = half_rsqrt(v.w);
+    return r;
+}
+
diff --git a/driver/runtime/arch/x86_math.ll b/driver/runtime/arch/x86_math.ll
new file mode 100755
index 0000000..60add80
--- /dev/null
+++ b/driver/runtime/arch/x86_math.ll
@@ -0,0 +1,40 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.sqrt.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readnone
+declare float @llvm.exp.f32(float) nounwind readonly
+declare float @llvm.pow.f32(float, float) nounwind readonly
+
+define float @_Z4sqrtf(float %in) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.sqrt.f32(float %in) nounwind readnone
+  ret float %1
+}
+
+define <2 x float> @_Z4sqrtDv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z4sqrtDv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <3 x float> @llvm.sqrt.v3f32(<3 x float> %in) nounwind readnone
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z4sqrtDv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %in) nounwind readnone
+  ret <4 x float> %1
+}
+
+define float @_Z3expf(float %in) nounwind readnone {
+  %1 = tail call float @llvm.exp.f32(float %in) nounwind readnone
+  ret float %1
+}
+
+define float @_Z3powff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @llvm.pow.f32(float %v1, float %v2) nounwind readnone
+  ret float %1
+}
+
diff --git a/driver/runtime/build_bc_lib.mk b/driver/runtime/build_bc_lib.mk
new file mode 100644
index 0000000..542cd78
--- /dev/null
+++ b/driver/runtime/build_bc_lib.mk
@@ -0,0 +1,75 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+BCC_STRIP_ATTR := $(HOST_OUT_EXECUTABLES)/bcc_strip_attr$(HOST_EXECUTABLE_SUFFIX)
+
+# We need to pass the +long64 flag to the underlying version of Clang, since
+# we are generating a library for use with Renderscript (64-bit long type,
+# not 32-bit).
+bc_clang_cc1_cflags := -target-feature +long64
+bc_translated_clang_cc1_cflags := $(addprefix -Xclang , $(bc_clang_cc1_cflags))
+
+bc_cflags := -MD \
+             -DRS_VERSION=$(RS_VERSION) \
+             -std=c99 \
+             -c \
+             -O3 \
+             -fno-builtin \
+             -emit-llvm \
+             -target armv7-none-linux-gnueabi \
+             -fsigned-char \
+             $(LOCAL_CFLAGS) \
+             $(bc_translated_clang_cc1_cflags)
+
+ifeq ($(rs_debug_runtime),1)
+bc_cflags += -DRS_DEBUG_RUNTIME
+endif
+rs_debug_runtime:=
+
+c_sources := $(filter %.c,$(LOCAL_SRC_FILES))
+ll_sources := $(filter %.ll,$(LOCAL_SRC_FILES))
+
+c_bc_files := $(patsubst %.c,%.bc, \
+    $(addprefix $(intermediates)/, $(c_sources)))
+
+ll_bc_files := $(patsubst %.ll,%.bc, \
+    $(addprefix $(intermediates)/, $(ll_sources)))
+
+$(c_bc_files): PRIVATE_INCLUDES := \
+    frameworks/rs/scriptc \
+    external/clang/lib/Headers
+$(c_bc_files): PRIVATE_CFLAGS := $(bc_cflags)
+
+$(c_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.c  $(CLANG)
+	@mkdir -p $(dir $@)
+	$(hide) $(CLANG) $(addprefix -I, $(PRIVATE_INCLUDES)) $(PRIVATE_CFLAGS) $< -o $@
+
+$(ll_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.ll $(LLVM_AS)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_AS) $< -o $@
+
+-include $(c_bc_files:%.bc=%.d)
+-include $(ll_bc_files:%.bc=%.d)
+
+$(LOCAL_BUILT_MODULE): PRIVATE_BC_FILES := $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(LLVM_LINK) $(clcore_LLVM_LD)
+$(LOCAL_BUILT_MODULE): $(LLVM_AS) $(BCC_STRIP_ATTR)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_LINK) $(PRIVATE_BC_FILES) -o $@.unstripped
+	$(hide) $(BCC_STRIP_ATTR) -o $@ $@.unstripped
diff --git a/driver/runtime/convert.ll b/driver/runtime/convert.ll
new file mode 100644
index 0000000..f45850d
--- /dev/null
+++ b/driver/runtime/convert.ll
@@ -0,0 +1,731 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FLOAT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <2 x float> @_Z14convert_float2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  ret <2 x float> %in
+}
+
+define <3 x float> @_Z14convert_float3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  ret <3 x float> %in
+}
+
+define <4 x float> @_Z14convert_float4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  ret <4 x float> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  CHAR                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+define <4 x i8> @_Z13convert_char4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UCHAR                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  SHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z14convert_short4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 USHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z15convert_ushort4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                   INT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z12convert_int4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UINT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z13convert_uint4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
diff --git a/driver/runtime/math.ll b/driver/runtime/math.ll
new file mode 100644
index 0000000..f026d15
--- /dev/null
+++ b/driver/runtime/math.ll
@@ -0,0 +1,19 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+declare float @llvm.sqrt.f32(float)
+declare float @llvm.pow.f32(float, float)
+declare float @llvm.fabs.f32(float)
+declare <2 x float> @llvm.fabs.v2f32(<2 x float>)
+declare <3 x float> @llvm.fabs.v3f32(<3 x float>)
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
+
+define float @_Z4sqrtf(float %v) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.sqrt.f32(float %v)
+  ret float %1
+}
+
+define float @_Z3powf(float %v1, float %v2) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.pow.f32(float  %v1, float %v2)
+  ret float %1
+}
diff --git a/driver/runtime/matrix.ll b/driver/runtime/matrix.ll
new file mode 100644
index 0000000..c56405d
--- /dev/null
+++ b/driver/runtime/matrix.ll
@@ -0,0 +1,176 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+%struct.rs_matrix4x4 = type { [16 x float] }
+%struct.rs_matrix3x3 = type { [9 x float] }
+%struct.rs_matrix2x2 = type { [4 x float] }
+
+define internal <4 x float> @smear_f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 6
+  %pz2 = bitcast float* %pz to <3 x float>*
+  %zm2 = load <3 x float>* %pz2, align 4
+  %zm = shufflevector <3 x float> %zm2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a4, %a3
+  %a6 = shufflevector <4 x float> %a5, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a6
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = shufflevector <4 x float> %a3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a4
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %x0 = extractelement <4 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <4 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <4 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+  %w0 = extractelement <4 x float> %in, i32 3
+  %w = tail call <4 x float> @smear_f(float %w0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a3, %a4
+  %a6 = fmul <4 x float> %w, %wm
+  %a7 = fadd <4 x float> %a5, %a6
+  ret <4 x float> %a7
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  %a5 = fmul <4 x float> %z, %zm
+  %a6 = fadd <4 x float> %a4, %a5
+  ret <4 x float> %a6
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  ret <4 x float> %a4
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
diff --git a/driver/runtime/rsClamp.ll b/driver/runtime/rsClamp.ll
new file mode 100644
index 0000000..eba678a
--- /dev/null
+++ b/driver/runtime/rsClamp.ll
@@ -0,0 +1,60 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+define float @_Z7rsClampfff(float %value, float %low, float %high) nounwind readonly {
+  %1 = fcmp olt float %value, %high
+  %2 = select i1 %1, float %value, float %high
+  %3 = fcmp ogt float %2, %low
+  %4 = select i1 %3, float %2, float %low
+  ret float %4
+}
+
+define signext i8 @_Z7rsClampccc(i8 signext %value, i8 signext %low, i8 signext %high) nounwind readonly {
+  %1 = icmp slt i8 %value, %high
+  %2 = select i1 %1, i8 %value, i8 %high
+  %3 = icmp sgt i8 %2, %low
+  %4 = select i1 %3, i8 %2, i8 %low
+  ret i8 %4
+}
+
+define zeroext i8 @_Z7rsClamphhh(i8 zeroext %value, i8 zeroext %low, i8 zeroext %high) nounwind readonly {
+  %1 = icmp ult i8 %value, %high
+  %2 = select i1 %1, i8 %value, i8 %high
+  %3 = icmp ugt i8 %2, %low
+  %4 = select i1 %3, i8 %2, i8 %low
+  ret i8 %4
+}
+
+define signext i16 @_Z7rsClampsss(i16 signext %value, i16 signext %low, i16 signext %high) nounwind readonly {
+  %1 = icmp slt i16 %value, %high
+  %2 = select i1 %1, i16 %value, i16 %high
+  %3 = icmp sgt i16 %2, %low
+  %4 = select i1 %3, i16 %2, i16 %low
+  ret i16 %4
+}
+
+define zeroext i16 @_Z7rsClampttt(i16 zeroext %value, i16 zeroext %low, i16 zeroext %high) nounwind readonly {
+  %1 = icmp ult i16 %value, %high
+  %2 = select i1 %1, i16 %value, i16 %high
+  %3 = icmp ugt i16 %2, %low
+  %4 = select i1 %3, i16 %2, i16 %low
+  ret i16 %4
+}
+
+define i32 @_Z7rsClampiii(i32 %value, i32 %low, i32 %high) nounwind readonly {
+  %1 = icmp slt i32 %value, %high
+  %2 = select i1 %1, i32 %value, i32 %high
+  %3 = icmp sgt i32 %2, %low
+  %4 = select i1 %3, i32 %2, i32 %low
+  ret i32 %4
+}
+
+define i32 @_Z7rsClampjjj(i32 %value, i32 %low, i32 %high) nounwind readonly {
+  %1 = icmp ult i32 %value, %high
+  %2 = select i1 %1, i32 %value, i32 %high
+  %3 = icmp ugt i32 %2, %low
+  %4 = select i1 %3, i32 %2, i32 %low
+  ret i32 %4
+}
+
diff --git a/driver/runtime/rs_allocation.c b/driver/runtime/rs_allocation.c
new file mode 100644
index 0000000..1d0f5b6
--- /dev/null
+++ b/driver/runtime/rs_allocation.c
@@ -0,0 +1,310 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+// Opaque Allocation type operations
+extern uint32_t __attribute__((overloadable))
+    rsAllocationGetDimX(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimX;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimY(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimY;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimZ(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimZ;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimLOD(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasMipmaps;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimFaces(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasFaces;
+}
+
+
+extern rs_element __attribute__((overloadable))
+        rsAllocationGetElement(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    if (alloc == NULL) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    Type_t *type = (Type_t *)alloc->mHal.state.type;
+    rs_element returnElem = {type->mHal.state.element};
+    return returnElem;
+}
+
+// TODO: this needs to be optimized, obviously
+static void memcpy(void* dst, void* src, size_t size) {
+    char* dst_c = (char*) dst, *src_c = (char*) src;
+    for (; size > 0; size--) {
+        *dst_c++ = *src_c++;
+    }
+}
+
+#ifdef RS_DEBUG_RUNTIME
+#define ELEMENT_AT(T)                                                   \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x);  \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x, uint32_t y); \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x, uint32_t y, uint32_t z); \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x);  \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x, uint32_t y); \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x, uint32_t y, uint32_t z); \
+                                                                        \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x) {            \
+        rsSetElementAt_##T(a, &val, x);                                 \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y) { \
+        rsSetElementAt_##T(a, &val, x, y);                              \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y, uint32_t z) { \
+        rsSetElementAt_##T(a, &val, x, y, z);                           \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x) {                   \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x);                                 \
+        return tmp;                                                     \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y) {       \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x, y);                              \
+        return tmp;                                                     \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) { \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x, y, z);                           \
+        return tmp;                                                     \
+    }
+
+#else
+#define ELEMENT_AT(T)                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x) {            \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t eSize = sizeof(T);                               \
+        *((T*)&p[(eSize * x)]) = val;                                   \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t eSize = sizeof(T);                               \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        *((T*)&p[(eSize * x) + (y * stride)]) = val;                    \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y, uint32_t z) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;         \
+        uint8_t *dp = &p[(sizeof(T) * x) + (y * stride) + (z * stride * dimY)]; \
+        ((T*)dp)[0] = val;                                        \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x) {                   \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        return *((T*)&p[(sizeof(T) * x)]);                              \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y) {       \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        return *((T*)&p[(sizeof(T) * x) + (y * stride)]);               \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;         \
+        const uint8_t *dp = &p[(sizeof(T) * x) + (y * stride) + (z * stride * dimY)]; \
+        return ((const T*)dp)[0];                                       \
+    }
+
+
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    return &p[eSize * x];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    return &p[(eSize * x) + (y * stride)];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;
+    return &p[(eSize * x) + (y * stride) + (z * stride * dimY)];
+}
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    memcpy((void*)&p[eSize * x], ptr, eSize);
+}
+
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    memcpy((void*)&p[(eSize * x) + (y * stride)], ptr, eSize);
+}
+
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y, uint32_t z) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;
+    memcpy((void*)&p[(eSize * x) + (y * stride) + (z * stride * dimY)], ptr, eSize);
+}
+#endif
+
+ELEMENT_AT(char)
+ELEMENT_AT(char2)
+ELEMENT_AT(char3)
+ELEMENT_AT(char4)
+ELEMENT_AT(uchar)
+ELEMENT_AT(uchar2)
+ELEMENT_AT(uchar3)
+ELEMENT_AT(uchar4)
+ELEMENT_AT(short)
+ELEMENT_AT(short2)
+ELEMENT_AT(short3)
+ELEMENT_AT(short4)
+ELEMENT_AT(ushort)
+ELEMENT_AT(ushort2)
+ELEMENT_AT(ushort3)
+ELEMENT_AT(ushort4)
+ELEMENT_AT(int)
+ELEMENT_AT(int2)
+ELEMENT_AT(int3)
+ELEMENT_AT(int4)
+ELEMENT_AT(uint)
+ELEMENT_AT(uint2)
+ELEMENT_AT(uint3)
+ELEMENT_AT(uint4)
+ELEMENT_AT(long)
+ELEMENT_AT(long2)
+ELEMENT_AT(long3)
+ELEMENT_AT(long4)
+ELEMENT_AT(ulong)
+ELEMENT_AT(ulong2)
+ELEMENT_AT(ulong3)
+ELEMENT_AT(ulong4)
+ELEMENT_AT(float)
+ELEMENT_AT(float2)
+ELEMENT_AT(float3)
+ELEMENT_AT(float4)
+ELEMENT_AT(double)
+ELEMENT_AT(double2)
+ELEMENT_AT(double3)
+ELEMENT_AT(double4)
+
+#undef ELEMENT_AT
+
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_Y(rs_allocation a, uint32_t x, uint32_t y) {
+    return rsGetElementAt_uchar(a, x, y);
+}
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_U(rs_allocation a, uint32_t x, uint32_t y) {
+
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint32_t yuvID = alloc->mHal.state.yuv;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[1].mallocPtr;
+    const uint32_t stride = alloc->mHal.drvState.lod[1].stride;
+
+    switch(yuvID) {
+    case 0x32315659: //HAL_PIXEL_FORMAT_YV12:
+        x >>= 1;
+        y >>= 1;
+        return p[x + (y * stride)];
+    case 11: //HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
+        x >>= 1;
+        y >>= 1;
+        return p[(x<<1) + (y * stride)];
+    default:
+        break;
+    }
+
+    return 0;
+}
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_V(rs_allocation a, uint32_t x, uint32_t y) {
+
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint32_t yuvID = alloc->mHal.state.yuv;
+
+    switch(yuvID) {
+    case 0x32315659: //HAL_PIXEL_FORMAT_YV12:
+        {
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[2].mallocPtr;
+        const uint32_t stride = alloc->mHal.drvState.lod[2].stride;
+        x >>= 1;
+        y >>= 1;
+        return p[x + (y * stride)];
+        }
+    case 11: //HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
+        {
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[1].mallocPtr;
+        const uint32_t stride = alloc->mHal.drvState.lod[1].stride;
+        x >>= 1;
+        y >>= 1;
+        return p[(x<<1) + (y * stride) + 1];
+        }
+    default:
+            break;
+    }
+
+    return 0;
+}
+
diff --git a/driver/runtime/rs_cl.c b/driver/runtime/rs_cl.c
new file mode 100755
index 0000000..c0543de
--- /dev/null
+++ b/driver/runtime/rs_cl.c
@@ -0,0 +1,1194 @@
+#include "rs_types.rsh"
+
+extern float2 __attribute__((overloadable)) convert_float2(int2 c);
+extern float3 __attribute__((overloadable)) convert_float3(int3 c);
+extern float4 __attribute__((overloadable)) convert_float4(int4 c);
+
+extern int2 __attribute__((overloadable)) convert_int2(float2 c);
+extern int3 __attribute__((overloadable)) convert_int3(float3 c);
+extern int4 __attribute__((overloadable)) convert_int4(float4 c);
+
+
+extern float __attribute__((overloadable)) fmin(float v, float v2);
+extern float2 __attribute__((overloadable)) fmin(float2 v, float v2);
+extern float3 __attribute__((overloadable)) fmin(float3 v, float v2);
+extern float4 __attribute__((overloadable)) fmin(float4 v, float v2);
+
+extern float __attribute__((overloadable)) fmax(float v, float v2);
+extern float2 __attribute__((overloadable)) fmax(float2 v, float v2);
+extern float3 __attribute__((overloadable)) fmax(float3 v, float v2);
+extern float4 __attribute__((overloadable)) fmax(float4 v, float v2);
+
+// Float ops, 6.11.2
+
+#define FN_FUNC_FN(fnc)                                         \
+extern float2 __attribute__((overloadable)) fnc(float2 v) { \
+    float2 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern float3 __attribute__((overloadable)) fnc(float3 v) { \
+    float3 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern float4 __attribute__((overloadable)) fnc(float4 v) { \
+    float4 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define IN_FUNC_FN(fnc)                                         \
+extern int2 __attribute__((overloadable)) fnc(float2 v) {   \
+    int2 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern int3 __attribute__((overloadable)) fnc(float3 v) {   \
+    int3 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern int4 __attribute__((overloadable)) fnc(float4 v) {   \
+    int4 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define FN_FUNC_FN_FN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2) { \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2) { \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float4 v2) { \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_F(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float v2) {  \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float v2) {  \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float v2) {  \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_IN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2) {   \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2) {   \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2) {   \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_I(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int v2) {    \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int v2) {    \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int v2) {    \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_PFN(fnc)                     \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 *v2) {            \
+    float2 r;                                   \
+    float t[2];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 *v2) {            \
+    float3 r;                                   \
+    float t[3];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 *v2) {            \
+    float4 r;                                   \
+    float t[4];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    r.w = fnc(v1.w, &t[3]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    v2->w = t[3];                               \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_PIN(fnc)                                                 \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2) {  \
+    float2 r;                                                               \
+    int t[2];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2) {  \
+    float3 r;                                                               \
+    int t[3];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 *v2) {  \
+    float4 r;                                                               \
+    int t[4];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    r.w = fnc(v1.w, &t[3]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    v2->w = t[3];                                                           \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_FN_FN(fnc)                   \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, float2 v3) {  \
+    float2 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, float3 v3) {  \
+    float3 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, float4 v3) {  \
+    float4 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    r.w = fnc(v1.w, v2.w, v3.w);                \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_FN_PIN(fnc)                  \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, int2 *v3) {   \
+    float2 r;                                   \
+    int t[2];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, int3 *v3) {   \
+    float3 r;                                   \
+    int t[3];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, int4 *v3) {   \
+    float4 r;                                   \
+    int t[4];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    r.w = fnc(v1.w, v2.w, &t[3]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    v3->w = t[3];                               \
+    return r;                                   \
+}
+
+static const int iposinf = 0x7f800000;
+static const int ineginf = 0xff800000;
+
+static const float posinf() {
+    float f = *((float*)&iposinf);
+    return f;
+}
+
+static const float neginf() {
+    float f = *((float*)&ineginf);
+    return f;
+}
+
+static bool isinf(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == iposinf) || (i == ineginf);
+}
+
+static bool isnan(float f) {
+    int i = *((int*)(void*)&f);
+    return (((i & 0x7f800000) == 0x7f800000) && (i & 0x007fffff));
+}
+
+static bool isposzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x00000000);
+}
+
+static bool isnegzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x80000000);
+}
+
+static bool iszero(float f) {
+    return isposzero(f) || isnegzero(f);
+}
+
+
+extern float __attribute__((overloadable)) acos(float);
+FN_FUNC_FN(acos)
+
+extern float __attribute__((overloadable)) acosh(float);
+FN_FUNC_FN(acosh)
+
+
+extern float __attribute__((overloadable)) acospi(float v) {
+    return acos(v) / M_PI;
+}
+FN_FUNC_FN(acospi)
+
+extern float __attribute__((overloadable)) asin(float);
+FN_FUNC_FN(asin)
+
+extern float __attribute__((overloadable)) asinh(float);
+FN_FUNC_FN(asinh)
+
+extern float __attribute__((overloadable)) asinpi(float v) {
+    return asin(v) / M_PI;
+}
+FN_FUNC_FN(asinpi)
+
+extern float __attribute__((overloadable)) atan(float);
+FN_FUNC_FN(atan)
+
+extern float __attribute__((overloadable)) atan2(float, float);
+FN_FUNC_FN_FN(atan2)
+
+extern float __attribute__((overloadable)) atanh(float);
+FN_FUNC_FN(atanh)
+
+extern float __attribute__((overloadable)) atanpi(float v) {
+    return atan(v) / M_PI;
+}
+FN_FUNC_FN(atanpi)
+
+
+extern float __attribute__((overloadable)) atan2pi(float y, float x) {
+    return atan2(y, x) / M_PI;
+}
+FN_FUNC_FN_FN(atan2pi)
+
+extern float __attribute__((overloadable)) cbrt(float);
+FN_FUNC_FN(cbrt)
+
+extern float __attribute__((overloadable)) ceil(float);
+FN_FUNC_FN(ceil)
+
+extern float __attribute__((overloadable)) copysign(float, float);
+FN_FUNC_FN_FN(copysign)
+
+extern float __attribute__((overloadable)) cos(float);
+FN_FUNC_FN(cos)
+
+extern float __attribute__((overloadable)) cosh(float);
+FN_FUNC_FN(cosh)
+
+extern float __attribute__((overloadable)) cospi(float v) {
+    return cos(v * M_PI);
+}
+FN_FUNC_FN(cospi)
+
+extern float __attribute__((overloadable)) erfc(float);
+FN_FUNC_FN(erfc)
+
+extern float __attribute__((overloadable)) erf(float);
+FN_FUNC_FN(erf)
+
+extern float __attribute__((overloadable)) exp(float);
+FN_FUNC_FN(exp)
+
+extern float __attribute__((overloadable)) exp2(float);
+FN_FUNC_FN(exp2)
+
+extern float __attribute__((overloadable)) pow(float, float);
+
+extern float __attribute__((overloadable)) exp10(float v) {
+    return exp2(v * 3.321928095f);
+}
+FN_FUNC_FN(exp10)
+
+extern float __attribute__((overloadable)) expm1(float);
+FN_FUNC_FN(expm1)
+
+extern float __attribute__((overloadable)) fabs(float v) {
+    int i = *((int*)(void*)&v) & 0x7fffffff;
+    return  *((float*)(void*)&i);
+}
+FN_FUNC_FN(fabs)
+
+extern float __attribute__((overloadable)) fdim(float, float);
+FN_FUNC_FN_FN(fdim)
+
+extern float __attribute__((overloadable)) floor(float);
+FN_FUNC_FN(floor)
+
+extern float __attribute__((overloadable)) fma(float, float, float);
+FN_FUNC_FN_FN_FN(fma)
+
+extern float __attribute__((overloadable)) fmin(float, float);
+
+extern float __attribute__((overloadable)) fmod(float, float);
+FN_FUNC_FN_FN(fmod)
+
+extern float __attribute__((overloadable)) fract(float v, float *iptr) {
+    int i = (int)floor(v);
+    if (iptr) {
+        iptr[0] = i;
+    }
+    return fmin(v - i, 0x1.fffffep-1f);
+}
+FN_FUNC_FN_PFN(fract)
+
+extern float __attribute__((overloadable)) frexp(float, int *);
+FN_FUNC_FN_PIN(frexp)
+
+extern float __attribute__((overloadable)) hypot(float, float);
+FN_FUNC_FN_FN(hypot)
+
+extern int __attribute__((overloadable)) ilogb(float);
+IN_FUNC_FN(ilogb)
+
+extern float __attribute__((overloadable)) ldexp(float, int);
+FN_FUNC_FN_IN(ldexp)
+FN_FUNC_FN_I(ldexp)
+
+extern float __attribute__((overloadable)) lgamma(float);
+FN_FUNC_FN(lgamma)
+extern float __attribute__((overloadable)) lgamma(float, int*);
+FN_FUNC_FN_PIN(lgamma)
+
+extern float __attribute__((overloadable)) log(float);
+FN_FUNC_FN(log)
+
+extern float __attribute__((overloadable)) log10(float);
+FN_FUNC_FN(log10)
+
+
+extern float __attribute__((overloadable)) log2(float v) {
+    return log10(v) * 3.321928095f;
+}
+FN_FUNC_FN(log2)
+
+extern float __attribute__((overloadable)) log1p(float);
+FN_FUNC_FN(log1p)
+
+extern float __attribute__((overloadable)) logb(float);
+FN_FUNC_FN(logb)
+
+extern float __attribute__((overloadable)) mad(float a, float b, float c) {
+    return a * b + c;
+}
+extern float2 __attribute__((overloadable)) mad(float2 a, float2 b, float2 c) {
+    return a * b + c;
+}
+extern float3 __attribute__((overloadable)) mad(float3 a, float3 b, float3 c) {
+    return a * b + c;
+}
+extern float4 __attribute__((overloadable)) mad(float4 a, float4 b, float4 c) {
+    return a * b + c;
+}
+
+extern float __attribute__((overloadable)) modf(float, float *);
+FN_FUNC_FN_PFN(modf);
+
+extern float __attribute__((overloadable)) nan(uint v) {
+    float f[1];
+    uint32_t *ip = (uint32_t *)f;
+    *ip = v | 0x7fc00000;
+    return f[0];
+}
+
+extern float __attribute__((overloadable)) nextafter(float, float);
+FN_FUNC_FN_FN(nextafter)
+
+FN_FUNC_FN_FN(pow)
+
+extern float __attribute__((overloadable)) pown(float v, int p) {
+    return pow(v, (float)p);
+}
+extern float2 __attribute__((overloadable)) pown(float2 v, int2 p) {
+    float2 f2 = convert_float2(p);
+    return pow(v, f2);
+}
+extern float3 __attribute__((overloadable)) pown(float3 v, int3 p) {
+    float3 f3 = convert_float3(p);
+    return pow(v, f3);
+}
+extern float4 __attribute__((overloadable)) pown(float4 v, int4 p) {
+    float4 f4 = convert_float4(p);
+    return pow(v, f4);
+}
+
+extern float __attribute__((overloadable)) powr(float v, float p) {
+    return pow(v, p);
+}
+extern float2 __attribute__((overloadable)) powr(float2 v, float2 p) {
+    return pow(v, p);
+}
+extern float3 __attribute__((overloadable)) powr(float3 v, float3 p) {
+    return pow(v, p);
+}
+extern float4 __attribute__((overloadable)) powr(float4 v, float4 p) {
+    return pow(v, p);
+}
+
+extern float __attribute__((overloadable)) remainder(float, float);
+FN_FUNC_FN_FN(remainder)
+
+extern float __attribute__((overloadable)) remquo(float, float, int *);
+FN_FUNC_FN_FN_PIN(remquo)
+
+extern float __attribute__((overloadable)) rint(float);
+FN_FUNC_FN(rint)
+
+extern float __attribute__((overloadable)) rootn(float v, int r) {
+    if (r == 0) {
+        return nan(0);
+    }
+
+    if (iszero(v)) {
+        if (r < 0) {
+            if (r & 1) {
+                return copysign(posinf(), v);
+            } else {
+                return posinf();
+            }
+        } else {
+            if (r & 1) {
+                return copysign(0.f, v);
+            } else {
+                return 0.f;
+            }
+        }
+    }
+
+    if (!isinf(v) && !isnan(v) && (v < 0.f)) {
+        if (r & 1) {
+            return (-1.f * pow(-1.f * v, 1.f / r));
+        } else {
+            return nan(0);
+        }
+    }
+
+    return pow(v, 1.f / r);
+}
+FN_FUNC_FN_IN(rootn);
+
+extern float __attribute__((overloadable)) round(float);
+FN_FUNC_FN(round)
+
+
+extern float __attribute__((overloadable)) sqrt(float);
+extern float __attribute__((overloadable)) rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+FN_FUNC_FN(rsqrt)
+
+extern float __attribute__((overloadable)) sin(float);
+FN_FUNC_FN(sin)
+
+extern float __attribute__((overloadable)) sincos(float v, float *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float2 __attribute__((overloadable)) sincos(float2 v, float2 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float3 __attribute__((overloadable)) sincos(float3 v, float3 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float4 __attribute__((overloadable)) sincos(float4 v, float4 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+
+extern float __attribute__((overloadable)) sinh(float);
+FN_FUNC_FN(sinh)
+
+extern float __attribute__((overloadable)) sinpi(float v) {
+    return sin(v * M_PI);
+}
+FN_FUNC_FN(sinpi)
+
+extern float __attribute__((overloadable)) tan(float);
+FN_FUNC_FN(tan)
+
+extern float __attribute__((overloadable)) tanh(float);
+FN_FUNC_FN(tanh)
+
+extern float __attribute__((overloadable)) tanpi(float v) {
+    return tan(v * M_PI);
+}
+FN_FUNC_FN(tanpi)
+
+
+extern float __attribute__((overloadable)) tgamma(float);
+FN_FUNC_FN(tgamma)
+
+extern float __attribute__((overloadable)) trunc(float);
+FN_FUNC_FN(trunc)
+
+// Int ops (partial), 6.11.3
+
+#define XN_FUNC_YN(typeout, fnc, typein)                                \
+extern typeout __attribute__((overloadable)) fnc(typein);               \
+extern typeout##2 __attribute__((overloadable)) fnc(typein##2 v) {  \
+    typeout##2 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##3 __attribute__((overloadable)) fnc(typein##3 v) {  \
+    typeout##3 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##4 __attribute__((overloadable)) fnc(typein##4 v) {  \
+    typeout##4 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    r.w = fnc(v.w);                                                     \
+    return r;                                                           \
+}
+
+
+#define UIN_FUNC_IN(fnc)          \
+XN_FUNC_YN(uchar, fnc, char)      \
+XN_FUNC_YN(ushort, fnc, short)    \
+XN_FUNC_YN(uint, fnc, int)
+
+#define IN_FUNC_IN(fnc)           \
+XN_FUNC_YN(uchar, fnc, uchar)     \
+XN_FUNC_YN(char, fnc, char)       \
+XN_FUNC_YN(ushort, fnc, ushort)   \
+XN_FUNC_YN(short, fnc, short)     \
+XN_FUNC_YN(uint, fnc, uint)       \
+XN_FUNC_YN(int, fnc, int)
+
+
+#define XN_FUNC_XN_XN_BODY(type, fnc, body)         \
+extern type __attribute__((overloadable))       \
+        fnc(type v1, type v2) {                     \
+    return body;                                    \
+}                                                   \
+extern type##2 __attribute__((overloadable))    \
+        fnc(type##2 v1, type##2 v2) {               \
+    type##2 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    return r;                                       \
+}                                                   \
+extern type##3 __attribute__((overloadable))    \
+        fnc(type##3 v1, type##3 v2) {               \
+    type##3 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    return r;                                       \
+}                                                   \
+extern type##4 __attribute__((overloadable))    \
+        fnc(type##4 v1, type##4 v2) {               \
+    type##4 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    r.w = fnc(v1.w, v2.w);                          \
+    return r;                                       \
+}
+
+#define IN_FUNC_IN_IN_BODY(fnc, body) \
+XN_FUNC_XN_XN_BODY(uchar, fnc, body)  \
+XN_FUNC_XN_XN_BODY(char, fnc, body)   \
+XN_FUNC_XN_XN_BODY(ushort, fnc, body) \
+XN_FUNC_XN_XN_BODY(short, fnc, body)  \
+XN_FUNC_XN_XN_BODY(uint, fnc, body)   \
+XN_FUNC_XN_XN_BODY(int, fnc, body)    \
+XN_FUNC_XN_XN_BODY(float, fnc, body)
+
+
+/**
+ * abs
+ */
+extern uint32_t __attribute__((overloadable)) abs(int32_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+extern uint16_t __attribute__((overloadable)) abs(int16_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+extern uint8_t __attribute__((overloadable)) abs(int8_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+
+/**
+ * clz
+ */
+extern uint32_t __attribute__((overloadable)) clz(uint32_t v) {
+    return __builtin_clz(v);
+}
+extern uint16_t __attribute__((overloadable)) clz(uint16_t v) {
+    return (uint16_t)__builtin_clz(v);
+}
+extern uint8_t __attribute__((overloadable)) clz(uint8_t v) {
+    return (uint8_t)__builtin_clz(v);
+}
+extern int32_t __attribute__((overloadable)) clz(int32_t v) {
+    return (int32_t)__builtin_clz((uint32_t)v);
+}
+extern int16_t __attribute__((overloadable)) clz(int16_t v) {
+    return (int16_t)__builtin_clz(v);
+}
+extern int8_t __attribute__((overloadable)) clz(int8_t v) {
+    return (int8_t)__builtin_clz(v);
+}
+
+
+UIN_FUNC_IN(abs)
+IN_FUNC_IN(clz)
+
+
+// 6.11.4
+
+
+extern float __attribute__((overloadable)) degrees(float radians) {
+    return radians * (180.f / M_PI);
+}
+extern float2 __attribute__((overloadable)) degrees(float2 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float3 __attribute__((overloadable)) degrees(float3 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float4 __attribute__((overloadable)) degrees(float4 radians) {
+    return radians * (180.f / M_PI);
+}
+
+extern float __attribute__((overloadable)) mix(float start, float stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float2 amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float3 amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float4 amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+
+extern float __attribute__((overloadable)) radians(float degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float2 __attribute__((overloadable)) radians(float2 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float3 __attribute__((overloadable)) radians(float3 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float4 __attribute__((overloadable)) radians(float4 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+
+extern float __attribute__((overloadable)) step(float edge, float v) {
+    return (v < edge) ? 0.f : 1.f;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float2 v) {
+    float2 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float3 v) {
+    float3 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float4 v) {
+    float4 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    r.w = (v.w < edge.w) ? 0.f : 1.f;
+    return r;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float v) {
+    float2 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float v) {
+    float3 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float v) {
+    float4 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    r.w = (v < edge.w) ? 0.f : 1.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) smoothstep(float, float, float);
+extern float2 __attribute__((overloadable)) smoothstep(float2, float2, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float3, float3, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float4, float4, float4);
+extern float2 __attribute__((overloadable)) smoothstep(float, float, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float, float, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float, float, float4);
+
+extern float __attribute__((overloadable)) sign(float v) {
+    if (v > 0) return 1.f;
+    if (v < 0) return -1.f;
+    return v;
+}
+FN_FUNC_FN(sign)
+
+
+// 6.11.5
+extern float3 __attribute__((overloadable)) cross(float3 lhs, float3 rhs) {
+    float3 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) cross(float4 lhs, float4 rhs) {
+    float4 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    r.w = 0.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) length(float v);
+extern float __attribute__((overloadable)) length(float2 v);
+extern float __attribute__((overloadable)) length(float3 v);
+extern float __attribute__((overloadable)) length(float4 v);
+
+extern float __attribute__((overloadable)) distance(float lhs, float rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float2 lhs, float2 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float3 lhs, float3 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float4 lhs, float4 rhs) {
+    return length(lhs - rhs);
+}
+
+extern float __attribute__((overloadable)) normalize(float v) {
+    return 1.f;
+}
+extern float2 __attribute__((overloadable)) normalize(float2 v) {
+    return v / length(v);
+}
+extern float3 __attribute__((overloadable)) normalize(float3 v) {
+    return v / length(v);
+}
+extern float4 __attribute__((overloadable)) normalize(float4 v) {
+    return v / length(v);
+}
+
+extern float __attribute__((overloadable)) half_sqrt(float);
+
+extern float __attribute__((overloadable)) fast_length(float v) {
+    return v;
+}
+extern float __attribute__((overloadable)) fast_length(float2 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y);
+}
+extern float __attribute__((overloadable)) fast_length(float3 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float __attribute__((overloadable)) fast_length(float4 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
+extern float __attribute__((overloadable)) fast_distance(float lhs, float rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float2 lhs, float2 rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float3 lhs, float3 rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float4 lhs, float4 rhs) {
+    return fast_length(lhs - rhs);
+}
+
+extern float __attribute__((overloadable)) half_rsqrt(float);
+
+extern float __attribute__((overloadable)) fast_normalize(float v) {
+    return 1.f;
+}
+extern float2 __attribute__((overloadable)) fast_normalize(float2 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y);
+}
+extern float3 __attribute__((overloadable)) fast_normalize(float3 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float4 __attribute__((overloadable)) fast_normalize(float4 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
+extern float __attribute__((overloadable)) half_recip(float);
+
+/*
+extern float __attribute__((overloadable)) approx_atan(float x) {
+    if (x == 0.f)
+        return 0.f;
+    if (x < 0.f)
+        return -1.f * approx_atan(-1.f * x);
+    if (x > 1.f)
+        return M_PI_2 - approx_atan(approx_recip(x));
+    return x * approx_recip(1.f + 0.28f * x*x);
+}
+FN_FUNC_FN(approx_atan)
+*/
+
+typedef union
+{
+  float fv;
+  int32_t iv;
+} ieee_float_shape_type;
+
+/* Get a 32 bit int from a float.  */
+
+#define GET_FLOAT_WORD(i,d)                 \
+do {                                \
+  ieee_float_shape_type gf_u;                   \
+  gf_u.fv = (d);                     \
+  (i) = gf_u.iv;                      \
+} while (0)
+
+/* Set a float from a 32 bit int.  */
+
+#define SET_FLOAT_WORD(d,i)                 \
+do {                                \
+  ieee_float_shape_type sf_u;                   \
+  sf_u.iv = (i);                      \
+  (d) = sf_u.fv;                     \
+} while (0)
+
+
+
+// Valid -125 to 125
+extern float __attribute__((overloadable)) native_exp2(float v) {
+    int32_t iv = (int)v;
+    int32_t x = iv + (iv >> 31); // ~floor(v)
+    float r = (v - x);
+
+    float fo;
+    SET_FLOAT_WORD(fo, (x + 127) << 23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float r2 = r*r;
+    float adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float2 __attribute__((overloadable)) native_exp2(float2 v) {
+    int2 iv = convert_int2(v);
+    int2 x = iv + (iv >> (int2)31);//floor(v);
+    float2 r = (v - convert_float2(x));
+
+    x += 127;
+
+    float2 fo = (float2)(x << (int2)23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float2 r2 = r*r;
+    float2 adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float4 __attribute__((overloadable)) native_exp2(float4 v) {
+    int4 iv = convert_int4(v);
+    int4 x = iv + (iv >> (int4)31);//floor(v);
+    float4 r = (v - convert_float4(x));
+
+    x += 127;
+
+    float4 fo = (float4)(x << (int4)23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float4 r2 = r*r;
+    float4 adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float3 __attribute__((overloadable)) native_exp2(float3 v) {
+    float4 t = 1.f;
+    t.xyz = v;
+    return native_exp2(t).xyz;
+}
+
+
+extern float __attribute__((overloadable)) native_exp(float v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float2 __attribute__((overloadable)) native_exp(float2 v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float3 __attribute__((overloadable)) native_exp(float3 v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float4 __attribute__((overloadable)) native_exp(float4 v) {
+    return native_exp2(v * 1.442695041f);
+}
+
+extern float __attribute__((overloadable)) native_exp10(float v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float2 __attribute__((overloadable)) native_exp10(float2 v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float3 __attribute__((overloadable)) native_exp10(float3 v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float4 __attribute__((overloadable)) native_exp10(float4 v) {
+    return native_exp2(v * 3.321928095f);
+}
+
+extern float __attribute__((overloadable)) native_log2(float v) {
+    int32_t ibits;
+    GET_FLOAT_WORD(ibits, v);
+
+    int32_t e = (ibits >> 23) & 0xff;
+
+    ibits &= 0x7fffff;
+    ibits |= 127 << 23;
+
+    float ir;
+    SET_FLOAT_WORD(ir, ibits);
+
+    ir -= 1.5f;
+    float ir2 = ir*ir;
+    float adj2 = 0.405465108f + // -0.00009f +
+                 (0.666666667f * ir) -
+                 (0.222222222f * ir2) +
+                 (0.098765432f * ir*ir2) -
+                 (0.049382716f * ir2*ir2) +
+                 (0.026337449f * ir*ir2*ir2) -
+                 (0.014631916f * ir2*ir2*ir2);
+    adj2 *= (1.f / 0.693147181f);
+
+    return (float)(e - 127) + adj2;
+}
+extern float2 __attribute__((overloadable)) native_log2(float2 v) {
+    float2 v2 = {native_log2(v.x), native_log2(v.y)};
+    return v2;
+}
+extern float3 __attribute__((overloadable)) native_log2(float3 v) {
+    float3 v2 = {native_log2(v.x), native_log2(v.y), native_log2(v.z)};
+    return v2;
+}
+extern float4 __attribute__((overloadable)) native_log2(float4 v) {
+    float4 v2 = {native_log2(v.x), native_log2(v.y), native_log2(v.z), native_log2(v.w)};
+    return v2;
+}
+
+extern float __attribute__((overloadable)) native_log(float v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float2 __attribute__((overloadable)) native_log(float2 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float3 __attribute__((overloadable)) native_log(float3 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float4 __attribute__((overloadable)) native_log(float4 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+
+extern float __attribute__((overloadable)) native_log10(float v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float2 __attribute__((overloadable)) native_log10(float2 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float3 __attribute__((overloadable)) native_log10(float3 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float4 __attribute__((overloadable)) native_log10(float4 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+
+
+extern float __attribute__((overloadable)) native_powr(float v, float y) {
+    float v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float2 __attribute__((overloadable)) native_powr(float2 v, float2 y) {
+    float2 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float3 __attribute__((overloadable)) native_powr(float3 v, float3 y) {
+    float3 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float4 __attribute__((overloadable)) native_powr(float4 v, float4 y) {
+    float4 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+
+
+#undef FN_FUNC_FN
+#undef IN_FUNC_FN
+#undef FN_FUNC_FN_FN
+#undef FN_FUNC_FN_F
+#undef FN_FUNC_FN_IN
+#undef FN_FUNC_FN_I
+#undef FN_FUNC_FN_PFN
+#undef FN_FUNC_FN_PIN
+#undef FN_FUNC_FN_FN_FN
+#undef FN_FUNC_FN_FN_PIN
+#undef XN_FUNC_YN
+#undef UIN_FUNC_IN
+#undef IN_FUNC_IN
+#undef XN_FUNC_XN_XN_BODY
+#undef IN_FUNC_IN_IN_BODY
diff --git a/driver/runtime/rs_core.c b/driver/runtime/rs_core.c
new file mode 100644
index 0000000..54fcccb
--- /dev/null
+++ b/driver/runtime/rs_core.c
@@ -0,0 +1,204 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/* Function declarations from libRS */
+extern float4 __attribute__((overloadable)) convert_float4(uchar4 c);
+
+/* Implementation of Core Runtime */
+
+/*
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = 255;
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = (uchar)(a * 255.f + 0.5f);
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, 255};
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, color.w};
+    return c;
+}
+*/
+
+extern float4 rsUnpackColor8888(uchar4 c)
+{
+    return convert_float4(c) * 0.003921569f;
+}
+
+
+extern int32_t __attribute__((overloadable)) rsAtomicCas(volatile int32_t *ptr, int32_t expectedValue, int32_t newValue) {
+    return __sync_val_compare_and_swap(ptr, expectedValue, newValue);
+}
+
+extern uint32_t __attribute__((overloadable)) rsAtomicCas(volatile uint32_t *ptr, uint32_t expectedValue, uint32_t newValue) {
+    return __sync_val_compare_and_swap((volatile int32_t *)ptr, (int32_t)expectedValue, (int32_t)newValue);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicInc(volatile int32_t *ptr) {
+    return __sync_fetch_and_add(ptr, 1);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicDec(volatile int32_t *ptr) {
+    return __sync_fetch_and_sub(ptr, 1);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicAdd(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_add(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicSub(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_sub(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicAnd(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_and(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicOr(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_or(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicXor(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_xor(ptr, value);
+}
+
+extern uint32_t __attribute__((overloadable)) min(uint32_t, uint32_t);
+extern int32_t __attribute__((overloadable)) min(int32_t, int32_t);
+extern uint32_t __attribute__((overloadable)) max(uint32_t, uint32_t);
+extern int32_t __attribute__((overloadable)) max(int32_t, int32_t);
+
+extern uint32_t __attribute__((overloadable)) rsAtomicMin(volatile uint32_t *ptr, uint32_t value) {
+    uint32_t prev, status;
+    do {
+        prev = *ptr;
+        uint32_t n = min(value, prev);
+        status = rsAtomicCas((volatile int32_t*) ptr, (int32_t) prev, (int32_t)n);
+    } while (status != prev);
+    return prev;
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicMin(volatile int32_t *ptr, int32_t value) {
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        int32_t n = min(value, prev);
+        status = rsAtomicCas(ptr, prev, n);
+    } while (status != prev);
+    return prev;
+}
+
+extern uint32_t __attribute__((overloadable)) rsAtomicMax(volatile uint32_t *ptr, uint32_t value) {
+    uint32_t prev, status;
+    do {
+        prev = *ptr;
+        uint32_t n = max(value, prev);
+        status = rsAtomicCas((volatile int32_t*) ptr, (int32_t) prev, (int32_t) n);
+    } while (status != prev);
+    return prev;
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicMax(volatile int32_t *ptr, int32_t value) {
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        int32_t n = max(value, prev);
+        status = rsAtomicCas(ptr, prev, n);
+    } while (status != prev);
+    return prev;
+}
+
+
+
+extern int32_t rand();
+#define RAND_MAX 0x7fffffff
+
+
+
+extern float __attribute__((overloadable)) rsRand(float min, float max);/* {
+    float r = (float)rand();
+    r /= RAND_MAX;
+    r = r * (max - min) + min;
+    return r;
+}
+*/
+
+extern float __attribute__((overloadable)) rsRand(float max) {
+    return rsRand(0.f, max);
+    //float r = (float)rand();
+    //r *= max;
+    //r /= RAND_MAX;
+    //return r;
+}
+
+extern int __attribute__((overloadable)) rsRand(int max) {
+    return (int)rsRand((float)max);
+}
+
+extern int __attribute__((overloadable)) rsRand(int min, int max) {
+    return (int)rsRand((float)min, (float)max);
+}
+
+#define PRIM_DEBUG(T)                               \
+extern void __attribute__((overloadable)) rsDebug(const char *, const T *);     \
+void __attribute__((overloadable)) rsDebug(const char *txt, T val) {            \
+    rsDebug(txt, &val);                                                         \
+}
+
+PRIM_DEBUG(char2)
+PRIM_DEBUG(char3)
+PRIM_DEBUG(char4)
+PRIM_DEBUG(uchar2)
+PRIM_DEBUG(uchar3)
+PRIM_DEBUG(uchar4)
+PRIM_DEBUG(short2)
+PRIM_DEBUG(short3)
+PRIM_DEBUG(short4)
+PRIM_DEBUG(ushort2)
+PRIM_DEBUG(ushort3)
+PRIM_DEBUG(ushort4)
+PRIM_DEBUG(int2)
+PRIM_DEBUG(int3)
+PRIM_DEBUG(int4)
+PRIM_DEBUG(uint2)
+PRIM_DEBUG(uint3)
+PRIM_DEBUG(uint4)
+PRIM_DEBUG(long2)
+PRIM_DEBUG(long3)
+PRIM_DEBUG(long4)
+PRIM_DEBUG(ulong2)
+PRIM_DEBUG(ulong3)
+PRIM_DEBUG(ulong4)
+PRIM_DEBUG(float2)
+PRIM_DEBUG(float3)
+PRIM_DEBUG(float4)
+PRIM_DEBUG(double2)
+PRIM_DEBUG(double3)
+PRIM_DEBUG(double4)
+
+#undef PRIM_DEBUG
+
diff --git a/driver/runtime/rs_element.c b/driver/runtime/rs_element.c
new file mode 100644
index 0000000..4db5883
--- /dev/null
+++ b/driver/runtime/rs_element.c
@@ -0,0 +1,111 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Element
+*/
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementCount(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.fieldsCount;
+}
+
+extern rs_element __attribute__((overloadable))
+        rsElementGetSubElement(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    rs_element returnElem = {element->mHal.state.fields[index]};
+    return returnElem;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementNameLength(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldNameLengths[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementName(rs_element e, uint32_t index, char *name, uint32_t nameLength) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount ||
+        nameLength == 0 || name == 0) {
+        return 0;
+    }
+
+    uint32_t numToCopy = element->mHal.state.fieldNameLengths[index];
+    if (nameLength < numToCopy) {
+        numToCopy = nameLength;
+    }
+    // Place the null terminator manually, in case of partial string
+    numToCopy --;
+    name[numToCopy] = '\0';
+    const char *nameSource = element->mHal.state.fieldNames[index];
+    for (uint32_t i = 0; i < numToCopy; i ++) {
+        name[i] = nameSource[i];
+    }
+    return numToCopy;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementArraySize(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldArraySizes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementOffsetBytes(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldOffsetBytes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetBytesSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.elementSizeBytes;
+}
+
+extern rs_data_type __attribute__((overloadable))
+        rsElementGetDataType(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_TYPE_INVALID;
+    }
+    return element->mHal.state.dataType;
+}
+
+extern rs_data_kind __attribute__((overloadable))
+        rsElementGetDataKind(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_KIND_INVALID;
+    }
+    return element->mHal.state.dataKind;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetVectorSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.vectorSize;
+}
diff --git a/driver/runtime/rs_matrix.c b/driver/runtime/rs_matrix.c
new file mode 100644
index 0000000..3afccc1
--- /dev/null
+++ b/driver/runtime/rs_matrix.c
@@ -0,0 +1,314 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/* Function declarations from libRS */
+extern float4 __attribute__((overloadable)) convert_float4(uchar4 c);
+
+/* Implementation of Core Runtime */
+
+
+/////////////////////////////////////////////////////
+// Matrix ops
+/////////////////////////////////////////////////////
+
+
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix4x4 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = 0.f;
+    m->m[5] = 1.f;
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 0.f;
+    m->m[9] = 0.f;
+    m->m[10] = 1.f;
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix3x3 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = 1.f;
+    m->m[5] = 0.f;
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix2x2 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 1.f;
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+    m->m[4] = f[4];
+    m->m[5] = f[5];
+    m->m[6] = f[6];
+    m->m[7] = f[7];
+    m->m[8] = f[8];
+    m->m[9] = f[9];
+    m->m[10] = f[10];
+    m->m[11] = f[11];
+    m->m[12] = f[12];
+    m->m[13] = f[13];
+    m->m[14] = f[14];
+    m->m[15] = f[15];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix3x3 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+    m->m[4] = f[4];
+    m->m[5] = f[5];
+    m->m[6] = f[6];
+    m->m[7] = f[7];
+    m->m[8] = f[8];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix2x2 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix4x4 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+    m->m[4] = s->m[4];
+    m->m[5] = s->m[5];
+    m->m[6] = s->m[6];
+    m->m[7] = s->m[7];
+    m->m[8] = s->m[8];
+    m->m[9] = s->m[9];
+    m->m[10] = s->m[10];
+    m->m[11] = s->m[11];
+    m->m[12] = s->m[12];
+    m->m[13] = s->m[13];
+    m->m[14] = s->m[14];
+    m->m[15] = s->m[15];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix3x3 *v) {
+    m->m[0] = v->m[0];
+    m->m[1] = v->m[1];
+    m->m[2] = v->m[2];
+    m->m[3] = 0.f;
+    m->m[4] = v->m[3];
+    m->m[5] = v->m[4];
+    m->m[6] = v->m[5];
+    m->m[7] = 0.f;
+    m->m[8] = v->m[6];
+    m->m[9] = v->m[7];
+    m->m[10] = v->m[8];
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix2x2 *v) {
+    m->m[0] = v->m[0];
+    m->m[1] = v->m[1];
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = v->m[2];
+    m->m[5] = v->m[3];
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 0.f;
+    m->m[9] = 0.f;
+    m->m[10] = 1.f;
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix3x3 *m, const rs_matrix3x3 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+    m->m[4] = s->m[4];
+    m->m[5] = s->m[5];
+    m->m[6] = s->m[6];
+    m->m[7] = s->m[7];
+    m->m[8] = s->m[8];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix2x2 *m, const rs_matrix2x2 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+}
+
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix4x4 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 4 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix4x4 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 4 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix3x3 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 3 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix3x3 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 3 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix2x2 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 2 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix2x2 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 2 + col];
+}
+
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix2x2 *m, float2 in) {
+    float2 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[2] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[3] * in.y);
+    return ret;
+}
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix2x2 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix2x2 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float4 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix4x4 *ret, const rs_matrix4x4 *lhs, const rs_matrix4x4 *rhs) {
+    for (int i=0 ; i<4 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        float ri2 = 0;
+        float ri3 = 0;
+        for (int j=0 ; j<4 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+            ri2 += rsMatrixGet(lhs, j, 2) * rhs_ij;
+            ri3 += rsMatrixGet(lhs, j, 3) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+        rsMatrixSet(ret, i, 2, ri2);
+        rsMatrixSet(ret, i, 3, ri3);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *lhs, const rs_matrix4x4 *rhs) {
+    rs_matrix4x4 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix3x3 *ret, const rs_matrix3x3 *lhs, const rs_matrix3x3 *rhs) {
+    for (int i=0 ; i<3 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        float ri2 = 0;
+        for (int j=0 ; j<3 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+            ri2 += rsMatrixGet(lhs, j, 2) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+        rsMatrixSet(ret, i, 2, ri2);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *lhs, const rs_matrix3x3 *rhs) {
+    rs_matrix3x3 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix2x2 *ret, const rs_matrix2x2 *lhs, const rs_matrix2x2 *rhs) {
+    for (int i=0 ; i<2 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        for (int j=0 ; j<2 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix2x2 *lhs, const rs_matrix2x2 *rhs) {
+    rs_matrix2x2 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
diff --git a/driver/runtime/rs_mesh.c b/driver/runtime/rs_mesh.c
new file mode 100644
index 0000000..bb533bc
--- /dev/null
+++ b/driver/runtime/rs_mesh.c
@@ -0,0 +1,55 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Mesh
+*/
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetVertexAllocationCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.vertexBuffersCount;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetPrimitiveCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.primitivesCount;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetVertexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.vertexBuffersCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.vertexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetIndexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.indexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_primitive __attribute__((overloadable))
+        rsgMeshGetPrimitive(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        return RS_PRIMITIVE_INVALID;
+    }
+    return mesh->mHal.state.primitives[index];
+}
diff --git a/driver/runtime/rs_program.c b/driver/runtime/rs_program.c
new file mode 100644
index 0000000..64c656f
--- /dev/null
+++ b/driver/runtime/rs_program.c
@@ -0,0 +1,108 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Program Store
+*/
+extern rs_depth_func __attribute__((overloadable))
+        rsgProgramStoreGetDepthFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_DEPTH_FUNC_INVALID;
+    }
+    return prog->mHal.state.depthFunc;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDepthMaskEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.depthWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskRedEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorRWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskGreenEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorGWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskBlueEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorBWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskAlphaEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorAWriteEnable;
+}
+
+extern rs_blend_src_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendSrcFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_SRC_INVALID;
+    }
+    return prog->mHal.state.blendSrc;
+}
+
+extern rs_blend_dst_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendDstFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_DST_INVALID;
+    }
+    return prog->mHal.state.blendDst;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDitherEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.ditherEnable;
+}
+
+/**
+* Program Raster
+*/
+extern bool __attribute__((overloadable))
+        rsgProgramRasterIsPointSpriteEnabled(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.pointSprite;
+}
+
+extern rs_cull_mode __attribute__((overloadable))
+        rsgProgramRasterGetCullMode(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return RS_CULL_INVALID;
+    }
+    return prog->mHal.state.cull;
+}
diff --git a/driver/runtime/rs_sample.c b/driver/runtime/rs_sample.c
new file mode 100644
index 0000000..2cd5bdc
--- /dev/null
+++ b/driver/runtime/rs_sample.c
@@ -0,0 +1,654 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+
+// 565 Conversion bits taken from SkBitmap
+#define SK_R16_BITS     5
+#define SK_G16_BITS     6
+#define SK_B16_BITS     5
+
+#define SK_R16_SHIFT    (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT    (SK_B16_BITS)
+#define SK_B16_SHIFT    0
+
+#define SK_R16_MASK     ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK     ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK     ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color)   (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color)   (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color)   (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+    return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+    return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+    return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c)      SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c)      SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c)      SkB16ToB32(SkGetPackedB16(c))
+
+static float3 getFrom565(uint16_t color) {
+    float3 result;
+    result.x = (float)SkPacked16ToR32(color);
+    result.y = (float)SkPacked16ToG32(color);
+    result.z = (float)SkPacked16ToB32(color);
+    return result;
+}
+
+/**
+* Allocation sampling
+*/
+static inline float __attribute__((overloadable))
+        getElementAt1(const uint8_t *p, int32_t x) {
+    float r = p[x];
+    return r;
+}
+
+static inline float2 __attribute__((overloadable))
+        getElementAt2(const uint8_t *p, int32_t x) {
+    x *= 2;
+    float2 r = {p[x], p[x+1]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt3(const uint8_t *p, int32_t x) {
+    x *= 4;
+    float3 r = {p[x], p[x+1], p[x+2]};
+    return r;
+}
+
+static inline float4 __attribute__((overloadable))
+        getElementAt4(const uint8_t *p, int32_t x) {
+    x *= 4;
+    const uchar4 *p2 = (const uchar4 *)&p[x];
+    return convert_float4(p2[0]);
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt565(const uint8_t *p, int32_t x) {
+    x *= 2;
+    float3 r = getFrom565(((const uint16_t *)p)[0]);
+    return r;
+}
+
+static inline float __attribute__((overloadable))
+        getElementAt1(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    float r = p[x];
+    return r;
+}
+
+static inline float2 __attribute__((overloadable))
+        getElementAt2(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 2;
+    float2 r = {p[x], p[x+1]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt3(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 4;
+    float3 r = {p[x], p[x+1], p[x+2]};
+    return r;
+}
+
+static inline float4 __attribute__((overloadable))
+        getElementAt4(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 4;
+    float4 r = {p[x], p[x+1], p[x+2], p[x+3]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt565(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 2;
+    float3 r = getFrom565(((const uint16_t *)p)[0]);
+    return r;
+}
+
+
+
+
+
+static float4 __attribute__((overloadable))
+            getSample_A(const uint8_t *p, int32_t iPixel,
+                          int32_t next, float w0, float w1) {
+    float p0 = getElementAt1(p, iPixel);
+    float p1 = getElementAt1(p, next);
+    float r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {0.f, 0.f, 0.f, r};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_L(const uint8_t *p, int32_t iPixel,
+                          int32_t next, float w0, float w1) {
+    float p0 = getElementAt1(p, iPixel);
+    float p1 = getElementAt1(p, next);
+    float r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r, r, r, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_LA(const uint8_t *p, int32_t iPixel,
+                           int32_t next, float w0, float w1) {
+    float2 p0 = getElementAt2(p, iPixel);
+    float2 p1 = getElementAt2(p, next);
+    float2 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.x, r.y};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGB(const uint8_t *p, int32_t iPixel,
+                            int32_t next, float w0, float w1) {
+    float3 p0 = getElementAt3(p, iPixel);
+    float3 p1 = getElementAt3(p, next);
+    float3 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_565(const uint8_t *p, int32_t iPixel,
+                           int32_t next, float w0, float w1) {
+    float3 p0 = getElementAt565(p, iPixel);
+    float3 p1 = getElementAt565(p, next);
+    float3 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGBA(const uint8_t *p, int32_t iPixel,
+                             int32_t next, float w0, float w1) {
+    float4 p0 = getElementAt4(p, iPixel);
+    float4 p1 = getElementAt4(p, next);
+    float4 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    return r;
+}
+
+
+static float4 __attribute__((overloadable))
+            getSample_A(const uint8_t *p, size_t stride,
+                          int locX, int locY, int nextX, int nextY,
+                          float w0, float w1, float w2, float w3) {
+    float p0 = getElementAt1(p, stride, locX, locY);
+    float p1 = getElementAt1(p, stride, nextX, locY);
+    float p2 = getElementAt1(p, stride, locX, nextY);
+    float p3 = getElementAt1(p, stride, nextX, nextY);
+    float r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {0.f, 0.f, 0.f, r};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_L(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float p0 = getElementAt1(p, stride, locX, locY);
+    float p1 = getElementAt1(p, stride, nextX, locY);
+    float p2 = getElementAt1(p, stride, locX, nextY);
+    float p3 = getElementAt1(p, stride, nextX, nextY);
+    float r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r, r, r, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_LA(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float2 p0 = getElementAt2(p, stride, locX, locY);
+    float2 p1 = getElementAt2(p, stride, nextX, locY);
+    float2 p2 = getElementAt2(p, stride, locX, nextY);
+    float2 p3 = getElementAt2(p, stride, nextX, nextY);
+    float2 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.x, r.y};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGB(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float4 p0 = getElementAt4(p, stride, locX, locY);
+    float4 p1 = getElementAt4(p, stride, nextX, locY);
+    float4 p2 = getElementAt4(p, stride, locX, nextY);
+    float4 p3 = getElementAt4(p, stride, nextX, nextY);
+    float4 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.y, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGBA(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float4 p0 = getElementAt4(p, stride, locX, locY);
+    float4 p1 = getElementAt4(p, stride, nextX, locY);
+    float4 p2 = getElementAt4(p, stride, locX, nextY);
+    float4 p3 = getElementAt4(p, stride, nextX, nextY);
+    float4 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    return r;
+}
+static float4 __attribute__((overloadable))
+            getSample_565(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float3 p0 = getElementAt565(p, stride, locX, locY);
+    float3 p1 = getElementAt565(p, stride, nextX, locY);
+    float3 p2 = getElementAt565(p, stride, locX, nextY);
+    float3 p3 = getElementAt565(p, stride, nextX, nextY);
+    float3 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret;
+    ret.rgb = r;
+    ret.w = 1.f;
+    return ret;
+}
+
+static float4 __attribute__((overloadable))
+        getBilinearSample1D(const Allocation_t *alloc, float2 weights,
+                          uint32_t iPixel, uint32_t next,
+                          rs_data_kind dk, rs_data_type dt, uint32_t lod) {
+
+     const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+     switch(dk) {
+     case RS_KIND_PIXEL_RGBA:
+         return getSample_RGBA(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_A:
+         return getSample_A(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_RGB:
+         if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+             return getSample_565(p, iPixel, next, weights.x, weights.y);
+         }
+         return getSample_RGB(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_L:
+         return getSample_L(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_LA:
+         return getSample_LA(p, iPixel, next, weights.x, weights.y);
+
+     default:
+         //__builtin_unreachable();
+         break;
+     }
+
+     //__builtin_unreachable();
+     return 0.f;
+}
+
+static uint32_t wrapI(rs_sampler_value wrap, int32_t coord, int32_t size) {
+    if (wrap == RS_SAMPLER_WRAP) {
+        coord = coord % size;
+        if (coord < 0) {
+            coord += size;
+        }
+    }
+    if (wrap == RS_SAMPLER_MIRRORED_REPEAT) {
+        coord = coord % (size * 2);
+        if (coord < 0) {
+            coord = (size * 2) + coord;
+        }
+        if (coord >= size) {
+            coord = (size * 2) - coord;
+        }
+    }
+    return (uint32_t)max(0, min(coord, size - 1));
+}
+
+static float4 __attribute__((overloadable))
+        getBilinearSample2D(const Allocation_t *alloc, float w0, float w1, float w2, float w3,
+                          int lx, int ly, int nx, int ny,
+                          rs_data_kind dk, rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+    size_t stride = alloc->mHal.drvState.lod[lod].stride;
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        return getSample_RGBA(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_A:
+        return getSample_A(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_LA:
+        return getSample_LA(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            return getSample_565(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+        }
+        return getSample_RGB(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_L:
+        return getSample_L(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+
+    default:
+        break;
+    }
+
+    return 0.f;
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(const Allocation_t *alloc, uint32_t iPixel, rs_data_kind dk,
+                         rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    float4 result = {0.f, 0.f, 0.f, 255.f};
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        result = getElementAt4(p, iPixel);
+        break;
+    case RS_KIND_PIXEL_A:
+        result.w = getElementAt1(p, iPixel);
+        break;
+    case RS_KIND_PIXEL_LA:
+        result.zw = getElementAt2(p, iPixel);
+        result.xy = result.z;
+        break;
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            result.xyz = getElementAt565(p, iPixel);
+        } else {
+            result.xyz = getElementAt3(p, iPixel);
+        }
+        break;
+    case RS_KIND_PIXEL_L:
+        result.xyz = getElementAt1(p, iPixel);
+
+    default:
+        //__builtin_unreachable();
+        break;
+    }
+
+    return result * 0.003921569f;
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(const Allocation_t *alloc, uint2 iPixel, rs_data_kind dk,
+                         rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+    size_t stride = alloc->mHal.drvState.lod[lod].stride;
+
+    float4 result = {0.f, 0.f, 0.f, 255.f};
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        result = getElementAt4(p, stride, iPixel.x, iPixel.y);
+        break;
+    case RS_KIND_PIXEL_A:
+        result.w = getElementAt1(p, stride, iPixel.x, iPixel.y);
+        break;
+    case RS_KIND_PIXEL_LA:
+        result.zw = getElementAt2(p, stride, iPixel.x, iPixel.y);
+        result.xy = result.z;
+        break;
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            result.xyz = getElementAt565(p, stride, iPixel.x, iPixel.y);
+        } else {
+            result.xyz = getElementAt3(p, stride, iPixel.x, iPixel.y);
+        }
+        break;
+
+    default:
+        //__builtin_unreachable();
+        break;
+    }
+
+    return result * 0.003921569f;
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(const Allocation_t *alloc,
+                               rs_data_kind dk, rs_data_type dt,
+                               rs_sampler_value wrapS,
+                               float uv, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    int32_t sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    float pixelUV = uv * (float)(sourceW);
+    int32_t iPixel = (int32_t)(pixelUV);
+    float frac = pixelUV - (float)iPixel;
+
+    if (frac < 0.5f) {
+        iPixel -= 1;
+        frac += 0.5f;
+    } else {
+        frac -= 0.5f;
+    }
+
+    float oneMinusFrac = 1.0f - frac;
+
+    float2 weights;
+    weights.x = oneMinusFrac;
+    weights.y = frac;
+
+    uint32_t next = wrapI(wrapS, iPixel + 1, sourceW);
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getBilinearSample1D(alloc, weights, location, next, dk, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(const Allocation_t *alloc,
+                                rs_data_kind dk, rs_data_type dt,
+                                rs_sampler_value wrapS,
+                                float uv, uint32_t lod) {
+
+    int32_t sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int32_t iPixel = (int32_t)(uv * (float)(sourceW));
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getNearestSample(alloc, location, dk, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(const Allocation_t *alloc,
+                               rs_data_kind dk, rs_data_type dt,
+                               rs_sampler_value wrapS,
+                               rs_sampler_value wrapT,
+                               float2 uv, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    int sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int sourceH = alloc->mHal.drvState.lod[lod].dimY;
+
+    float pixelU = uv.x * sourceW;
+    float pixelV = uv.y * sourceH;
+    int iPixelU = pixelU;
+    int iPixelV = pixelV;
+    float fracU = pixelU - iPixelU;
+    float fracV = pixelV - iPixelV;
+
+    if (fracU < 0.5f) {
+        iPixelU -= 1;
+        fracU += 0.5f;
+    } else {
+        fracU -= 0.5f;
+    }
+    if (fracV < 0.5f) {
+        iPixelV -= 1;
+        fracV += 0.5f;
+    } else {
+        fracV -= 0.5f;
+    }
+    float oneMinusFracU = 1.0f - fracU;
+    float oneMinusFracV = 1.0f - fracV;
+
+    float w0 = oneMinusFracU * oneMinusFracV;
+    float w1 = fracU * oneMinusFracV;
+    float w2 = oneMinusFracU * fracV;
+    float w3 = fracU * fracV;
+
+    int nx = wrapI(wrapS, iPixelU + 1, sourceW);
+    int ny = wrapI(wrapT, iPixelV + 1, sourceH);
+    int lx = wrapI(wrapS, iPixelU, sourceW);
+    int ly = wrapI(wrapT, iPixelV, sourceH);
+
+    return getBilinearSample2D(alloc, w0, w1, w2, w3, lx, ly, nx, ny, dk, dt, lod);
+
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(const Allocation_t *alloc,
+                                rs_data_kind dk, rs_data_type dt,
+                                rs_sampler_value wrapS,
+                                rs_sampler_value wrapT,
+                                float2 uv, uint32_t lod) {
+    int sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int sourceH = alloc->mHal.drvState.lod[lod].dimY;
+
+    float2 dimF;
+    dimF.x = (float)(sourceW);
+    dimF.y = (float)(sourceH);
+    int2 iPixel = convert_int2(uv * dimF);
+
+    uint2 location;
+    location.x = wrapI(wrapS, iPixel.x, sourceW);
+    location.y = wrapI(wrapT, iPixel.y, sourceH);
+    return getNearestSample(alloc, location, dk, dt, lod);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float uv, float lod) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value sampleMin = prog->mHal.state.minFilter;
+    rs_sampler_value sampleMag = prog->mHal.state.magFilter;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (lod <= 0.0f) {
+        if (sampleMag == RS_SAMPLER_NEAREST) {
+            return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, uv, 0);
+        }
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, 0);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_NEAREST) {
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod = min(lod, (float)maxLOD);
+        uint32_t nearestLOD = (uint32_t)round(lod);
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, nearestLOD);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_LINEAR) {
+        uint32_t lod0 = (uint32_t)floor(lod);
+        uint32_t lod1 = (uint32_t)ceil(lod);
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod0 = min(lod0, maxLOD);
+        lod1 = min(lod1, maxLOD);
+        float4 sample0 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, lod0);
+        float4 sample1 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, lod1);
+        float frac = lod - (float)lod0;
+        return sample0 * (1.0f - frac) + sample1 * frac;
+    }
+
+    return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, uv, 0);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float location) {
+    return rsSample(a, s, location, 0);
+}
+
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 uv, float lod) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value sampleMin = prog->mHal.state.minFilter;
+    rs_sampler_value sampleMag = prog->mHal.state.magFilter;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+    rs_sampler_value wrapT = prog->mHal.state.wrapT;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (lod <= 0.0f) {
+        if (sampleMag == RS_SAMPLER_NEAREST) {
+            return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+        }
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_NEAREST) {
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod = min(lod, (float)maxLOD);
+        uint32_t nearestLOD = (uint32_t)round(lod);
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, nearestLOD);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_LINEAR) {
+        uint32_t lod0 = (uint32_t)floor(lod);
+        uint32_t lod1 = (uint32_t)ceil(lod);
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod0 = min(lod0, maxLOD);
+        lod1 = min(lod1, maxLOD);
+        float4 sample0 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, lod0);
+        float4 sample1 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, lod1);
+        float frac = lod - (float)lod0;
+        return sample0 * (1.0f - frac) + sample1 * frac;
+    }
+
+    return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 uv) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+    rs_sampler_value wrapT = prog->mHal.state.wrapT;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (prog->mHal.state.magFilter == RS_SAMPLER_NEAREST) {
+        return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+    }
+    return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+}
+
diff --git a/driver/runtime/rs_sampler.c b/driver/runtime/rs_sampler.c
new file mode 100644
index 0000000..39782de
--- /dev/null
+++ b/driver/runtime/rs_sampler.c
@@ -0,0 +1,51 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Sampler
+*/
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMinification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.minFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMagnification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.magFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapS(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapS;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapT(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapT;
+}
+
+extern float __attribute__((overloadable))
+        rsSamplerGetAnisotropy(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return 0.0f;
+    }
+    return prog->mHal.state.aniso;
+}
diff --git a/driver/runtime/rs_structs.h b/driver/runtime/rs_structs.h
new file mode 100644
index 0000000..6db4279
--- /dev/null
+++ b/driver/runtime/rs_structs.h
@@ -0,0 +1,262 @@
+#ifndef _RS_STRUCTS_H_
+#define _RS_STRUCTS_H_
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Allocation owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsAllocation.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetDimX(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * allocations.
+ *
+ *****************************************************************************/
+typedef enum {
+    RS_ALLOCATION_MIPMAP_NONE = 0,
+    RS_ALLOCATION_MIPMAP_FULL = 1,
+    RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE = 2
+} rs_allocation_mipmap_control;
+
+typedef struct Allocation {
+    char __pad[32];
+    struct {
+        void * drv;
+        struct {
+            const void *type;
+            uint32_t usageFlags;
+            rs_allocation_mipmap_control mipmapControl;
+            uint32_t yuv;
+            uint32_t elementSizeBytes;
+            bool hasMipmaps;
+            bool hasFaces;
+            bool hasReferences;
+            void * usrPtr;
+            int32_t surfaceTextureID;
+            void * wndSurface;
+            void * surfaceTexture;
+        } state;
+
+        struct DrvState {
+            struct LodState {
+                void * mallocPtr;
+                size_t stride;
+                uint32_t dimX;
+                uint32_t dimY;
+                uint32_t dimZ;
+            } lod[16/*android::renderscript::Allocation::MAX_LOD*/];
+            size_t faceOffset;
+            uint32_t lodCount;
+            uint32_t faceCount;
+        } drvState;
+    } mHal;
+} Allocation_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramStore owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramStore.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramStoreGetDepthFunc(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program store.
+ *
+ *****************************************************************************/
+typedef struct ProgramStore {
+    char __pad[40];
+    struct {
+        struct {
+            bool ditherEnable;
+            bool colorRWriteEnable;
+            bool colorGWriteEnable;
+            bool colorBWriteEnable;
+            bool colorAWriteEnable;
+            rs_blend_src_func blendSrc;
+            rs_blend_dst_func blendDst;
+            bool depthWriteEnable;
+            rs_depth_func depthFunc;
+        } state;
+    } mHal;
+} ProgramStore_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramRaster owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramRaster.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetCullMode(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program raster.
+ *
+ *****************************************************************************/
+typedef struct ProgramRaster {
+    char __pad[36];
+    struct {
+        void * drv;
+        struct {
+            bool pointSprite;
+            rs_cull_mode cull;
+        } state;
+    } mHal;
+} ProgramRaster_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Sampler owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsSampler.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetMagFilter(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * samplers.
+ *
+ *****************************************************************************/
+typedef struct Sampler {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            rs_sampler_value magFilter;
+            rs_sampler_value minFilter;
+            rs_sampler_value wrapS;
+            rs_sampler_value wrapT;
+            rs_sampler_value wrapR;
+            float aniso;
+        } state;
+    } mHal;
+} Sampler_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Element owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsElement.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsElementGetSubElementCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * elements.
+ *
+ *****************************************************************************/
+typedef struct Element {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            rs_data_type dataType;
+            rs_data_kind dataKind;
+            uint32_t vectorSize;
+            uint32_t elementSizeBytes;
+
+            // Subelements
+            const void **fields;
+            uint32_t *fieldArraySizes;
+            const char **fieldNames;
+            uint32_t *fieldNameLengths;
+            uint32_t *fieldOffsetBytes;
+            uint32_t fieldsCount;
+        } state;
+    } mHal;
+} Element_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Type owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsType.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetElement(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * types.
+ *
+ *****************************************************************************/
+typedef struct Type {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            const void * element;
+            uint32_t dimX;
+            uint32_t dimY;
+            uint32_t dimZ;
+            uint32_t *lodDimX;
+            uint32_t *lodDimY;
+            uint32_t *lodDimZ;
+            uint32_t *lodOffset;
+            uint32_t lodCount;
+            bool faces;
+        } state;
+    } mHal;
+} Type_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Mesh owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsMesh.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsMeshGetVertexAllocationCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * meshes.
+ *
+ *****************************************************************************/
+typedef struct Mesh {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            void **vertexBuffers;
+            uint32_t vertexBuffersCount;
+
+            // indexBuffers[i] could be NULL, in which case only primitives[i] is used
+            void **indexBuffers;
+            uint32_t indexBuffersCount;
+            rs_primitive *primitives;
+            uint32_t primitivesCount;
+        } state;
+    } mHal;
+} Mesh_t;
+#endif // _RS_CORE_H_