am dfa409d3: (-s ours) Reconcile with jb-mr2-release - do not merge

* commit 'dfa409d3a8f46fd11340f62fb73487e04e6698e7':
  Fix crash in Vine
diff --git a/Android.mk b/Android.mk
index 42a018e..bef48c0 100644
--- a/Android.mk
+++ b/Android.mk
@@ -311,142 +311,4 @@
 include $(LLVM_ROOT_PATH)/llvm-host-build.mk
 include $(BUILD_HOST_STATIC_LIBRARY)
 
-
-#=============================================================================
-# librsloader-test (Device)
-#-----------------------------------------------------------------------------
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := test-librsloader
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SHARED_LIBRARIES := \
-  libstlport
-
-LOCAL_STATIC_LIBRARIES := \
-  librsloader \
-  libcutils \
-  liblog \
-  libLLVMSupport
-
-LOCAL_SRC_FILES := \
-  driver/linkloader/android/test-librsloader.c
-
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_CFLAGS += $(rs_base_CFLAGS)
-
-LOCAL_C_INCLUDES := \
-  $(LOCAL_PATH)/driver/linkloader \
-  $(LOCAL_PATH)/driver/linkloader/include
-
-include $(LLVM_ROOT_PATH)/llvm-device-build.mk
-include $(BUILD_EXECUTABLE)
-
-
-#=============================================================================
-# librsloader-test (Host)
-#-----------------------------------------------------------------------------
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := test-librsloader
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_LDLIBS := \
-  -lpthread \
-  -ldl
-
-LOCAL_STATIC_LIBRARIES := \
-  librsloader \
-  libcutils \
-  liblog \
-  libLLVMSupport
-
-LOCAL_SRC_FILES := \
-  driver/linkloader/android/test-librsloader.c
-
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_CFLAGS += $(rs_base_CFLAGS)
-
-LOCAL_C_INCLUDES := \
-  $(LOCAL_PATH)/driver/linkloader \
-  $(LOCAL_PATH)/driver/linkloader/include
-
-include $(LLVM_ROOT_PATH)/llvm-host-build.mk
-include $(BUILD_HOST_EXECUTABLE)
-
-
-#=============================================================================
-# rsloader
-#-----------------------------------------------------------------------------
-
-ifdef BUILD_RSLOADER_TOOL
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := rsloader
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SHARED_LIBRARIES := \
-  libstlport
-
-LOCAL_STATIC_LIBRARIES := \
-  libLLVMSupport
-
-LOCAL_SRC_FILES := \
-  driver/linkloader/lib/ELFHeader.cpp \
-  driver/linkloader/lib/ELFSymbol.cpp \
-  driver/linkloader/lib/ELFSectionHeader.cpp \
-  driver/linkloader/lib/ELFTypes.cpp \
-  driver/linkloader/lib/StubLayout.cpp \
-  driver/linkloader/utils/raw_ostream.cpp \
-  driver/linkloader/utils/rsl_assert.cpp \
-  driver/linkloader/utils/helper.cpp \
-  driver/linkloader/main.cpp
-
-LOCAL_C_INCLUDES := \
-  $(LOCAL_PATH)/driver/linkloader \
-  $(LOCAL_PATH)/driver/linkloader/include \
-  $(LOCAL_C_INCLUDES)
-
-include $(LLVM_ROOT_PATH)/llvm-device-build.mk
-include $(BUILD_EXECUTABLE)
-endif
-
-
-#=============================================================================
-# stub-layout-unit-test
-#-----------------------------------------------------------------------------
-
-ifdef BUILD_STUB_LAYOUT_TEST
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := stub-layout-unit-test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SHARED_LIBRARIES := \
-  libstlport
-
-LOCAL_SRC_FILES := \
-  driver/linkloader/lib/StubLayout.cpp \
-  driver/linkloader/utils/raw_ostream.cpp \
-  driver/linkloader/utils/helper.cpp \
-  driver/linkloader/tests/stub-test.cpp
-
-LOCAL_C_INCLUDES := \
-  $(LOCAL_PATH)/driver/linkloader \
-  $(LOCAL_PATH)/driver/linkloader/include \
-  $(LOCAL_C_INCLUDES)
-
-include $(LLVM_ROOT_PATH)/llvm-device-build.mk
-include $(BUILD_EXECUTABLE)
-endif
-
-
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/cpu_ref/rsCpuCore.cpp b/cpu_ref/rsCpuCore.cpp
index afa9c57..3e8f45b 100644
--- a/cpu_ref/rsCpuCore.cpp
+++ b/cpu_ref/rsCpuCore.cpp
@@ -108,6 +108,7 @@
 #ifndef RS_COMPATIBILITY_LIB
     mLinkRuntimeCallback = NULL;
     mSelectRTCallback = NULL;
+    mSetupCompilerCallback = NULL;
 #endif
 }
 
diff --git a/cpu_ref/rsCpuCore.h b/cpu_ref/rsCpuCore.h
index c6704fb..75a68ec 100644
--- a/cpu_ref/rsCpuCore.h
+++ b/cpu_ref/rsCpuCore.h
@@ -125,6 +125,14 @@
     RSSelectRTCallback getSelectRTCallback() {
         return mSelectRTCallback;
     }
+
+    virtual void setSetupCompilerCallback(
+            RSSetupCompilerCallback pSetupCompilerCallback) {
+        mSetupCompilerCallback = pSetupCompilerCallback;
+    }
+    virtual RSSetupCompilerCallback getSetupCompilerCallback() const {
+        return mSetupCompilerCallback;
+    }
 #endif
     virtual bool getInForEach() { return mInForEach; }
 
@@ -156,6 +164,7 @@
 #ifndef RS_COMPATIBILITY_LIB
     bcc::RSLinkRuntimeCallback mLinkRuntimeCallback;
     RSSelectRTCallback mSelectRTCallback;
+    RSSetupCompilerCallback mSetupCompilerCallback;
 #endif
 };
 
diff --git a/cpu_ref/rsCpuScript.cpp b/cpu_ref/rsCpuScript.cpp
index d9e0044..4ba60d3 100644
--- a/cpu_ref/rsCpuScript.cpp
+++ b/cpu_ref/rsCpuScript.cpp
@@ -38,8 +38,35 @@
     #include <bcc/Renderscript/RSCompilerDriver.h>
     #include <bcc/Renderscript/RSExecutable.h>
     #include <bcc/Renderscript/RSInfo.h>
+    #include <cutils/properties.h>
 #endif
 
+#ifndef RS_COMPATIBILITY_LIB
+namespace {
+static bool is_force_recompile() {
+#ifdef RS_SERVER
+  return false;
+#else
+  char buf[PROPERTY_VALUE_MAX];
+
+  // Re-compile if floating point precision has been overridden.
+  property_get("debug.rs.precision", buf, "");
+  if (buf[0] != '\0') {
+    return true;
+  }
+
+  // Re-compile if debug.rs.forcerecompile is set.
+  property_get("debug.rs.forcerecompile", buf, "0");
+  if ((::strcmp(buf, "1") == 0) || (::strcmp(buf, "true") == 0)) {
+    return true;
+  } else {
+    return false;
+  }
+#endif  // RS_SERVER
+}
+}  // namespace
+#endif  // !defined(RS_COMPATIBILITY_LIB)
+
 namespace android {
 namespace renderscript {
 
@@ -121,7 +148,7 @@
     mCtx->lockMutex();
 
 #ifndef RS_COMPATIBILITY_LIB
-    bcc::RSExecutable *exec;
+    bcc::RSExecutable *exec = NULL;
 
     mCompilerContext = NULL;
     mCompilerDriver = NULL;
@@ -144,6 +171,13 @@
     mCompilerDriver->setRSRuntimeLookupFunction(lookupRuntimeStub);
     mCompilerDriver->setRSRuntimeLookupContext(this);
 
+    // Run any compiler setup functions we have been provided with.
+    RSSetupCompilerCallback setupCompilerCallback =
+            mCtx->getSetupCompilerCallback();
+    if (setupCompilerCallback != NULL) {
+        setupCompilerCallback(mCompilerDriver);
+    }
+
     const char *core_lib = NULL;
     RSSelectRTCallback selectRTCallback = mCtx->getSelectRTCallback();
     if (selectRTCallback != NULL) {
@@ -154,10 +188,26 @@
         // Use the libclcore_debug.bc instead of the default library.
         core_lib = bcc::RSInfo::LibCLCoreDebugPath;
         mCompilerDriver->setDebugContext(true);
+        // Skip the cache lookup
+    } else if (!is_force_recompile()) {
+        // Attempt to just load the script from cache first if we can.
+        exec = mCompilerDriver->loadScript(cacheDir, resName,
+                                           (const char *)bitcode, bitcodeSize);
     }
-    exec = mCompilerDriver->build(*mCompilerContext, cacheDir, resName,
-                                  (const char *)bitcode, bitcodeSize, core_lib,
-                                  mCtx->getLinkRuntimeCallback());
+
+    // TODO(srhines): This is being refactored, but it simply wraps the
+    // build (compile) and load steps together.
+    if (exec == NULL) {
+        bool built = mCompilerDriver->build(*mCompilerContext, cacheDir,
+                                            resName, (const char *)bitcode,
+                                            bitcodeSize, core_lib,
+                                            mCtx->getLinkRuntimeCallback());
+        if (built) {
+            exec = mCompilerDriver->loadScript(cacheDir, resName,
+                                               (const char *)bitcode,
+                                               bitcodeSize);
+        }
+    }
 
     if (exec == NULL) {
         ALOGE("bcc: FAILS to prepare executable for '%s'", resName);
diff --git a/cpu_ref/rsd_cpu.h b/cpu_ref/rsd_cpu.h
index d5642aa..d81a145 100644
--- a/cpu_ref/rsd_cpu.h
+++ b/cpu_ref/rsd_cpu.h
@@ -28,12 +28,16 @@
 
 namespace bcc {
 
+class RSCompilerDriver;
 class RSScript;
-typedef llvm::Module* (*RSLinkRuntimeCallback) (bcc::RSScript *, llvm::Module *, llvm::Module *);
+typedef llvm::Module* (*RSLinkRuntimeCallback)
+        (bcc::RSScript *, llvm::Module *, llvm::Module *);
 
 }  // end namespace bcc;
 
 typedef const char* (*RSSelectRTCallback) (const char*, size_t);
+
+typedef void (*RSSetupCompilerCallback) (bcc::RSCompilerDriver *);
 #endif
 
 namespace android {
@@ -119,6 +123,11 @@
     virtual CpuScriptGroup * createScriptGroup(const ScriptGroup *sg) = 0;
     virtual bool getInForEach() = 0;
 
+#ifndef RS_COMPATIBILITY_LIB
+    virtual void setSetupCompilerCallback(
+            RSSetupCompilerCallback pSetupCompilerCallback) = 0;
+    virtual RSSetupCompilerCallback getSetupCompilerCallback() const = 0;
+#endif
 };
 
 
diff --git a/driver/Android.mk b/driver/Android.mk
new file mode 100644
index 0000000..271de29
--- /dev/null
+++ b/driver/Android.mk
@@ -0,0 +1,4 @@
+
+LOCAL_PATH:=$(call my-dir)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/driver/linkloader/tests/images/gen-testcases.sh b/driver/linkloader/tests/images/gen-testcases.sh
deleted file mode 100755
index 26c7cdf..0000000
--- a/driver/linkloader/tests/images/gen-testcases.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash -e
-
-
-# Copyright (C) 2011-2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CYAN='\033[1;36m'
-RESET='\033[m'
-
-echo -e "${CYAN}Generating bitcode ...${RESET}"
-clang -emit-llvm -std=c89 -Wall -c test.c -o test.bc
-clang -emit-llvm -std=c89 -Wall -c simple-test.c -o simple-test.bc
-clang -emit-llvm -std=c89 -Wall -c rodata-test.c -o rodata-test.bc
-
-function gen_test_cases {
-  echo -e "${CYAN}Generating for $1 ...${RESET}"
-  llc -filetype=obj -relocation-model=static -mtriple $2 $3 test.bc -o test-$1.o
-  llc -filetype=obj -relocation-model=static -mtriple $2 $3 simple-test.bc -o simple-test-$1.o
-  llc -filetype=obj -relocation-model=static -mtriple $2 $3 rodata-test.bc -o rodata-test-$1.o
-}
-
-gen_test_cases arm    armv7-none-linux-gnueabi
-gen_test_cases tegra2 armv7-none-linux-gnueabi '-mcpu=cortex-a9 -mattr=+vfp3'
-gen_test_cases thumb2 thumb-none-linux-gnueabi '-march=thumb -mattr=+thumb2'
-gen_test_cases thumb2lc thumb-none-linux-gnueabi '-mattr=+thumb2,+neonfp,+vfp3 -arm-long-calls'
-gen_test_cases thumb2lc-xoom thumb-none-linux-gnueabi '-mattr=+thumb2 -arm-long-calls'
-gen_test_cases x86_32 i686-none-linux
-gen_test_cases x86_64 x86_64-none-linux
-gen_test_cases mipsel mipsel-none-linux-gnueabi
diff --git a/driver/linkloader/tests/images/rodata-test.c b/driver/linkloader/tests/images/rodata-test.c
deleted file mode 100644
index d695d01..0000000
--- a/driver/linkloader/tests/images/rodata-test.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2011, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include<stdio.h>
-
-static char const *const test_str[] = {
-  "string 1",
-  "string 2",
-  "string 3",
-  "long long long long long long string"
-};
-
-static size_t test_str_count = sizeof(test_str) / sizeof(char const *const);
-
-int main(){
-  int i;
-  printf("test_str: %p\n", &test_str);
-  for (i = 0; i < test_str_count; ++i) {
-    printf("%p\n", test_str[i]);
-    printf("%s\n", test_str[i]);
-  }
-
-  return 0;
-}
diff --git a/driver/linkloader/tests/images/simple-test.c b/driver/linkloader/tests/images/simple-test.c
deleted file mode 100644
index c59201d..0000000
--- a/driver/linkloader/tests/images/simple-test.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2011, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <time.h>
-
-int main() {
-  srand(time(NULL));
-
-  unsigned int ans = rand() % 100;
-  unsigned int user = 100;
-  unsigned int left = 0;
-  unsigned int right = 99;
-
-  printf("Hello, droid!  Let's play a number guessing game!\n");
-
-  while (user != ans) {
-    printf("Please input a number [%d-%d]:\n", left, right);
-
-    if (scanf("%u", &user) != 1) {
-      break;
-    }
-
-    if (user < left || user > right) {
-      /* Out of range, ignore this answer. */
-      continue;
-    } else if (user == ans) {
-      printf("You got it!\n");
-      break;
-    } else if (user < ans) {
-      left = user;
-    } else {
-      right = user;
-    }
-  }
-
-  return 0;
-}
diff --git a/driver/linkloader/tests/images/test.c b/driver/linkloader/tests/images/test.c
deleted file mode 100644
index 01b69a1..0000000
--- a/driver/linkloader/tests/images/test.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2011, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include<stdio.h>
-static void hello_function(const char *ptr){
-        printf("%s", ptr);
-}
-int my_add(int para_x, int para_y){
-        return para_x + para_y;
-}
-int global_z_i;
-double global_z_d;
-int global_big_z_i[1000];
-double global_big_z_d[1000];
-static int global_static_z_i;
-static double global_static_z_d;
-static int global_static_big_z_i[1000];
-static double global_static_big_z_d[1000];
-int global_z_i_init = 1;
-double global_z_d_init = 1.1;
-/*extern int extern_z_i;   */
-/*extern double extern_z_d;*/
-int main(){
-        static int local_static_z_i;
-        static double local_static_z_d;
-        static int local_static_z_i_init = 2;
-        static double local_static_z_d_init = 2.2;
-        local_static_z_i = local_static_z_i_init;
-        local_static_z_d = local_static_z_d_init;
-        printf("%d %f\n", local_static_z_i, local_static_z_d);
-        printf("%d %f\n", local_static_z_i_init, local_static_z_d_init);
-        hello_function("Hello world!1\n");
-        hello_function("Hello world!2\n");
-        hello_function("Hello world!3\n");
-        global_z_i = my_add(1,2);
-        global_z_d = 3.3;
-        printf("%d %f\n", global_z_i, global_z_d);
-        global_big_z_i[100] = 4;
-        global_big_z_d[100] = 4.4;
-        printf("%d %f\n", global_big_z_i[100], global_big_z_d[100]);
-        global_static_z_i = my_add(2,1);
-        global_static_z_d = 3.3;
-        printf("%d %f\n", global_static_z_i, global_static_z_d);
-        int local_z_i = global_static_z_i = global_z_i;
-        double local_z_d = global_static_z_d = global_z_d;
-        printf("%d %f\n", local_z_i, local_z_d);
-        global_static_big_z_i[500] = 5;
-        global_static_big_z_d[500] = 5.5;
-        printf("%d %f\n", global_static_big_z_i[500], global_static_big_z_d[500]);
-        global_z_i_init = 6;
-        global_z_d_init = 6.6;
-        printf("%d %f\n", global_z_i_init, global_z_d_init);
-        /*printf("%d %f\n", extern_z_i, extern_z_d);*/
-        return 0;
-}
diff --git a/driver/linkloader/tests/stubs/arm-stub.c b/driver/linkloader/tests/stubs/arm-stub.c
deleted file mode 100644
index 2138f9a..0000000
--- a/driver/linkloader/tests/stubs/arm-stub.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-
-
-// Note: The first instruction stands for ldr, which loads the data from
-// memory to the specified register.  Notice that due to the pipeline design,
-// when ldr is executed, the program will be advanced by 8.  So, to get our
-// address we should substract it by 4.
-
-uint32_t stub[] = {
-  0xe51ff004ul, // ldr pc, [pc, #-4]
-  0x00000000ul  // address
-};
-
-int test() {
-  printf("hello world!\n");
-  return 5;
-}
-
-int main() {
-  int (*f)() = (int (*)())stub;
-  stub[1] = (uint32_t)(uintptr_t)test;
-
-  printf("return = %d\n", f());
-  return EXIT_SUCCESS;
-}
diff --git a/driver/linkloader/tests/stubs/mips-stub.c b/driver/linkloader/tests/stubs/mips-stub.c
deleted file mode 100644
index ff34d03..0000000
--- a/driver/linkloader/tests/stubs/mips-stub.c
+++ /dev/null
@@ -1,48 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-
-// --------------
-// Register Usage
-// --------------
-// $0       zero
-// $1       at
-// $2-$3    function return value registers
-// $4-$7    function argument registers
-// $8-$15   temporary
-// $16-$23  saved register
-// $24-$25  temporary
-// $26-$27  os kernel
-// $28      global pointer
-// $29      stack pointer
-// $30      saved register
-// $31      return addres reigster
-
-// --------------------
-// Instruction Encoding
-// --------------------
-// lui: 0011 1100 000t tttt iiii iiii iiii iiii
-// ori: 0011 01ss ssst tttt iiii iiii iiii iiii
-// jr:  0000 00ss sss0 0000 0000 0000 0000 1000
-// nop:  0000 0000 0000 0000 0000 0000 0000 0000
-
-uint32_t stub[] = {
-  0x3c190000ul,
-  0x37390000ul,
-  0x03200008ul,
-  0x00000000ul
-};
-
-int test() {
-  printf("hello world!\n");
-  return 5;
-}
-
-int main() {
-  int (*f)() = (int (*)())stub;
-  stub[0] |= (((uint32_t)(uintptr_t)test) >> 16) & 0xffff;
-  stub[1] |= (((uint32_t)(uintptr_t)test)) & 0xffff;
-
-  printf("return = %d\n", f());
-  return EXIT_SUCCESS;
-}
diff --git a/driver/linkloader/tests/stubs/stub-layout-test.cpp b/driver/linkloader/tests/stubs/stub-layout-test.cpp
deleted file mode 100644
index ba07cd2..0000000
--- a/driver/linkloader/tests/stubs/stub-layout-test.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2011, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "StubLayout.h"
-
-void function1() {
-  printf("hello ");
-}
-
-void function2() {
-  printf("world!\n");
-}
-
-int main() {
-  StubLayout stubs;
-
-  void (*func1)() = (void (*)())stubs.allocateStub((void *)&function1);
-  void (*func2)() = (void (*)())stubs.allocateStub((void *)&function2);
-
-  if (!func1) {
-    fprintf(stderr, "ERROR: Unable to allocate stub for function1\n");
-    exit(EXIT_FAILURE);
-  }
-
-  if (!func2) {
-    fprintf(stderr, "ERROR: Unable to allocate stub for function2\n");
-    exit(EXIT_FAILURE);
-  }
-
-  function1();
-  function2();
-
-  func1();
-  func2();
-
-  return EXIT_SUCCESS;
-}
diff --git a/driver/rsdCore.cpp b/driver/rsdCore.cpp
index d69255b..3357969 100644
--- a/driver/rsdCore.cpp
+++ b/driver/rsdCore.cpp
@@ -197,6 +197,13 @@
         return false;
     }
 
+#ifndef RS_COMPATIBILITY_LIB
+    // Set a callback for compiler setup here.
+    if (false) {
+        dc->mCpuRef->setSetupCompilerCallback(NULL);
+    }
+#endif
+
     return true;
 }
 
diff --git a/driver/runtime/Android.mk b/driver/runtime/Android.mk
new file mode 100755
index 0000000..7a119a3
--- /dev/null
+++ b/driver/runtime/Android.mk
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+# C/LLVM-IR source files for the library
+clcore_base_files := \
+    rs_allocation.c \
+    rs_cl.c \
+    rs_core.c \
+    rs_element.c \
+    rs_mesh.c \
+    rs_matrix.c \
+    rs_program.c \
+    rs_sample.c \
+    rs_sampler.c \
+    convert.ll \
+    rsClamp.ll
+
+clcore_files := \
+    $(clcore_base_files) \
+    math.ll \
+    arch/generic.c \
+    arch/sqrt.c \
+    arch/dot_length.c
+
+clcore_neon_files := \
+    $(clcore_base_files) \
+    math.ll \
+    arch/neon.ll \
+    arch/sqrt.c \
+    arch/dot_length.c \
+    arch/clamp.c
+
+ifeq ($(ARCH_X86_HAVE_SSE2), true)
+    clcore_x86_files := \
+    $(clcore_base_files) \
+    arch/x86_generic.c \
+    arch/x86_clamp.ll \
+    arch/x86_math.ll
+
+    ifeq ($(ARCH_X86_HAVE_SSE3), true)
+        clcore_x86_files += arch/x86_dot_length.ll
+    else
+        # FIXME: without SSE3, it is still able to get better code through PSHUFD. But,
+        # so far, there is no such device with SSE2 only.
+        clcore_x86_files += arch/dot_length.c
+    endif
+endif
+
+ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+  RS_VERSION := $(PLATFORM_SDK_VERSION)
+else
+  # Increment by 1 whenever this is not a final release build, since we want to
+  # be able to see the RS version number change during development.
+  # See build/core/version_defaults.mk for more information about this.
+  RS_VERSION := "(1 + $(PLATFORM_SDK_VERSION))"
+endif
+
+# Build the base version of the library
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+
+# Build a debug version of the library
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore_debug.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+rs_debug_runtime := 1
+LOCAL_SRC_FILES := $(clcore_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+
+# Build an optimized version of the library if the device is SSE2- or above
+# capable.
+ifeq ($(ARCH_X86_HAVE_SSE2),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore_x86.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_x86_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+endif
+
+# Build a NEON-enabled version of the library (if possible)
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+  include $(CLEAR_VARS)
+  LOCAL_MODULE := libclcore_neon.bc
+  LOCAL_MODULE_TAGS := optional
+  LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+  LOCAL_SRC_FILES := $(clcore_neon_files)
+  LOCAL_CFLAGS += -DARCH_ARM_HAVE_NEON
+
+  include $(LOCAL_PATH)/build_bc_lib.mk
+endif
diff --git a/driver/runtime/arch/clamp.c b/driver/runtime/arch/clamp.c
new file mode 100644
index 0000000..c2c2226
--- /dev/null
+++ b/driver/runtime/arch/clamp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rs_types.rsh"
+
+#define S_CLAMP(T) \
+extern T __attribute__((overloadable)) clamp(T amount, T low, T high) {             \
+    return amount < low ? low : (amount > high ? high : amount);                    \
+}
+
+//_CLAMP(float);  implemented in .ll
+S_CLAMP(double);
+S_CLAMP(char);
+S_CLAMP(uchar);
+S_CLAMP(short);
+S_CLAMP(ushort);
+S_CLAMP(int);
+S_CLAMP(uint);
+S_CLAMP(long);
+S_CLAMP(ulong);
+
+#undef S_CLAMP
+
+
+                                                                                    \
+#define V_CLAMP(T) \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T##2 low, T##2 high) { \
+    T##2 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T##3 low, T##3 high) { \
+    T##3 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T##4 low, T##4 high) { \
+    T##4 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    r.w = amount.w < low.w ? low.w : (amount.w > high.w ? high.w : amount.w);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T low, T high) {       \
+    T##2 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T low, T high) {       \
+    T##3 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T low, T high) {       \
+    T##4 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    r.w = amount.w < low ? low : (amount.w > high ? high : amount.w);               \
+    return r;                                                                       \
+}
+
+//V_CLAMP(float);  implemented in .ll
+V_CLAMP(double);
+V_CLAMP(char);
+V_CLAMP(uchar);
+V_CLAMP(short);
+V_CLAMP(ushort);
+#ifndef ARCH_ARM_HAVE_NEON
+    V_CLAMP(int);  //implemented in .ll
+    V_CLAMP(uint);  //implemented in .ll
+#endif
+V_CLAMP(long);
+V_CLAMP(ulong);
+
+#undef _CLAMP
+
diff --git a/driver/runtime/arch/dot_length.c b/driver/runtime/arch/dot_length.c
new file mode 100644
index 0000000..94c99b6
--- /dev/null
+++ b/driver/runtime/arch/dot_length.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rs_types.rsh"
+
+extern float __attribute__((overloadable)) dot(float lhs, float rhs) {
+    return lhs * rhs;
+}
+extern float __attribute__((overloadable)) dot(float2 lhs, float2 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y;
+}
+extern float __attribute__((overloadable)) dot(float3 lhs, float3 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z;
+}
+extern float __attribute__((overloadable)) dot(float4 lhs, float4 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z + lhs.w*rhs.w;
+}
+
+extern float __attribute__((overloadable)) fabs(float);
+extern float __attribute__((overloadable)) sqrt(float);
+
+extern float __attribute__((overloadable)) length(float v) {
+    return fabs(v);
+}
+extern float __attribute__((overloadable)) length(float2 v) {
+    return sqrt(v.x*v.x + v.y*v.y);
+}
+extern float __attribute__((overloadable)) length(float3 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float __attribute__((overloadable)) length(float4 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
diff --git a/driver/runtime/arch/generic.c b/driver/runtime/arch/generic.c
new file mode 100644
index 0000000..da83c2a
--- /dev/null
+++ b/driver/runtime/arch/generic.c
@@ -0,0 +1,948 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(short4);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(float4);
+extern float4 __attribute__((overloadable)) convert_float4(uchar4);
+extern float __attribute__((overloadable)) sqrt(float);
+
+/*
+ * CLAMP
+ */
+#define _CLAMP(T) \
+extern T __attribute__((overloadable)) clamp(T amount, T low, T high) {             \
+    return amount < low ? low : (amount > high ? high : amount);                    \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T##2 low, T##2 high) { \
+    T##2 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T##3 low, T##3 high) { \
+    T##3 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T##4 low, T##4 high) { \
+    T##4 r;                                                                         \
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);       \
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);       \
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);       \
+    r.w = amount.w < low.w ? low.w : (amount.w > high.w ? high.w : amount.w);       \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T low, T high) {       \
+    T##2 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T low, T high) {       \
+    T##3 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    return r;                                                                       \
+}                                                                                   \
+                                                                                    \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T low, T high) {       \
+    T##4 r;                                                                         \
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);               \
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);               \
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);               \
+    r.w = amount.w < low ? low : (amount.w > high ? high : amount.w);               \
+    return r;                                                                       \
+}
+
+_CLAMP(float);
+_CLAMP(double);
+_CLAMP(char);
+_CLAMP(uchar);
+_CLAMP(short);
+_CLAMP(ushort);
+_CLAMP(int);
+_CLAMP(uint);
+_CLAMP(long);
+_CLAMP(ulong);
+
+#undef _CLAMP
+
+/*
+ * FMAX
+ */
+
+extern float __attribute__((overloadable)) fmax(float v1, float v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    r.w = v1.w > v2 ? v1.w : v2;
+    return r;
+}
+
+extern float __attribute__((overloadable)) fmin(float v1, float v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+
+/*
+ * FMIN
+ */
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    r.w = v1.w < v2 ? v1.w : v2;
+    return r;
+}
+
+
+/*
+ * MAX
+ */
+
+extern char __attribute__((overloadable)) max(char v1, char v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) max(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) max(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) max(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern short __attribute__((overloadable)) max(short v1, short v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) max(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) max(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) max(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int __attribute__((overloadable)) max(int v1, int v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) max(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) max(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) max(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) max(int64_t v1, int64_t v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) max(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) max(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) max(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) max(uchar v1, uchar v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) max(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) max(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) max(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) max(ushort v1, ushort v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) max(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) max(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) max(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) max(uint v1, uint v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) max(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) max(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) max(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) max(ulong v1, ulong v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) max(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) max(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) max(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) max(float v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float2 v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float3 v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float4 v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+
+/*
+ * MIN
+ */
+
+extern int8_t __attribute__((overloadable)) min(int8_t v1, int8_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) min(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) min(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) min(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int16_t __attribute__((overloadable)) min(int16_t v1, int16_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) min(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) min(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) min(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int32_t __attribute__((overloadable)) min(int32_t v1, int32_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) min(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) min(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) min(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) min(int64_t v1, int64_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) min(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) min(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) min(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) min(uchar v1, uchar v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) min(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) min(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) min(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) min(ushort v1, ushort v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) min(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) min(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) min(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) min(uint v1, uint v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) min(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) min(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) min(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) min(ulong v1, ulong v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) min(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) min(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) min(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) min(float v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float2 v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float3 v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float4 v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+/*
+ * YUV
+ */
+
+extern uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v) {
+    short Y = ((short)y) - 16;
+    short U = ((short)u) - 128;
+    short V = ((short)v) - 128;
+
+    short4 p;
+    p.r = (Y * 298 + V * 409 + 128) >> 8;
+    p.g = (Y * 298 - U * 100 - V * 208 + 128) >> 8;
+    p.b = (Y * 298 + U * 516 + 128) >> 8;
+    p.a = 255;
+    p.r = rsClamp(p.r, (short)0, (short)255);
+    p.g = rsClamp(p.g, (short)0, (short)255);
+    p.b = rsClamp(p.b, (short)0, (short)255);
+
+    return convert_uchar4(p);
+}
+
+static float4 yuv_U_values = {0.f, -0.392f * 0.003921569f, +2.02 * 0.003921569f, 0.f};
+static float4 yuv_V_values = {1.603f * 0.003921569f, -0.815f * 0.003921569f, 0.f, 0.f};
+
+extern float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v) {
+    float4 color = (float)y * 0.003921569f;
+    float4 fU = ((float)u) - 128.f;
+    float4 fV = ((float)v) - 128.f;
+
+    color += fU * yuv_U_values;
+    color += fV * yuv_V_values;
+    color = clamp(color, 0.f, 1.f);
+    return color;
+}
+
+
+/*
+ * half_RECIP
+ */
+
+extern float __attribute__((overloadable)) half_recip(float v) {
+    // FIXME:  actual algorithm for generic approximate reciprocal
+    return 1.f / v;
+}
+
+extern float2 __attribute__((overloadable)) half_recip(float2 v) {
+    float2 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_recip(float3 v) {
+    float3 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_recip(float4 v) {
+    float4 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    r.w = half_recip(r.w);
+    return r;
+}
+
+
+/*
+ * half_SQRT
+ */
+
+extern float __attribute__((overloadable)) half_sqrt(float v) {
+    return sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_sqrt(float2 v) {
+    float2 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_sqrt(float3 v) {
+    float3 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_sqrt(float4 v) {
+    float4 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    r.w = half_sqrt(v.w);
+    return r;
+}
+
+
+/*
+ * half_rsqrt
+ */
+
+extern float __attribute__((overloadable)) half_rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_rsqrt(float2 v) {
+    float2 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_rsqrt(float3 v) {
+    float3 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_rsqrt(float4 v) {
+    float4 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    r.w = half_rsqrt(v.w);
+    return r;
+}
+
+/**
+ * matrix ops
+ */
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float4 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + (m->m[12] * in.w);
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + (m->m[13] * in.w);
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + (m->m[14] * in.w);
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + (m->m[15] * in.w);
+    return ret;
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float3 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + m->m[15];
+    return ret;
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float2 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + m->m[15];
+    return ret;
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float3 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y) + (m->m[6] * in.z);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y) + (m->m[7] * in.z);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y) + (m->m[8] * in.z);
+    return ret;
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float2 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y);
+    return ret;
+}
+
+/**
+ * Pixel Ops
+ */
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+{
+    uchar4 c;
+    c.x = (uchar)clamp((r * 255.f + 0.5f), 0.f, 255.f);
+    c.y = (uchar)clamp((g * 255.f + 0.5f), 0.f, 255.f);
+    c.z = (uchar)clamp((b * 255.f + 0.5f), 0.f, 255.f);
+    c.w = 255;
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+{
+    uchar4 c;
+    c.x = (uchar)clamp((r * 255.f + 0.5f), 0.f, 255.f);
+    c.y = (uchar)clamp((g * 255.f + 0.5f), 0.f, 255.f);
+    c.z = (uchar)clamp((b * 255.f + 0.5f), 0.f, 255.f);
+    c.w = (uchar)clamp((a * 255.f + 0.5f), 0.f, 255.f);
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    color = clamp(color, 0.f, 255.f);
+    uchar4 c = {color.x, color.y, color.z, 255};
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    color = clamp(color, 0.f, 255.f);
+    uchar4 c = {color.x, color.y, color.z, color.w};
+    return c;
+}
+
diff --git a/driver/runtime/arch/neon.ll b/driver/runtime/arch/neon.ll
new file mode 100644
index 0000000..4a1172b
--- /dev/null
+++ b/driver/runtime/arch/neon.ll
@@ -0,0 +1,1172 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;               INTRINSICS               ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                HELPERS                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define internal <4 x float> @smear_4f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+define internal <4 x i32> @smear_4i(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+  %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+  %4 = insertelement <4 x i32> %3, i32 %in, i32 3
+  ret <4 x i32> %4
+}
+
+define internal <4 x i16> @smear_4s(i16 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i16> undef, i16 %in, i32 0
+  %2 = insertelement <4 x i16> %1, i16 %in, i32 1
+  %3 = insertelement <4 x i16> %2, i16 %in, i32 2
+  %4 = insertelement <4 x i16> %3, i16 %in, i32 3
+  ret <4 x i16> %4
+}
+
+
+
+define internal <2 x float> @smear_2f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x float> undef, float %in, i32 0
+  %2 = insertelement <2 x float> %1, float %in, i32 1
+  ret <2 x float> %2
+}
+
+define internal <2 x i32> @smear_2i(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <2 x i32> %1, i32 %in, i32 1
+  ret <2 x i32> %2
+}
+
+define internal <2 x i16> @smear_2s(i16 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x i16> undef, i16 %in, i32 0
+  %2 = insertelement <2 x i16> %1, i16 %in, i32 1
+  ret <2 x i16> %2
+}
+
+
+define internal <4 x i32> @smear_4i32(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+  %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+  %4 = insertelement <4 x i32> %3, i32 %in, i32 3
+  ret <4 x i32> %4
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 CLAMP                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %low, <4 x float> %high) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %value, <4 x float> %high) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %low) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <4 x float> @_Z5clampDv4_fff(<4 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %out = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %_low, <4 x float> %_high) nounwind readonly
+  ret <4 x float> %out
+}
+
+define <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %value, <3 x float> %low, <3 x float> %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x float> %low, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x float> %high, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <3 x float> @_Z5clampDv3_fff(<3 x float> %value, float %low, float %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %value, <2 x float> %low, <2 x float> %high) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %high) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %1, <2 x float> %low) nounwind readnone
+  ret <2 x float> %2
+}
+
+define <2 x float> @_Z5clampDv2_fff(<2 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <2 x float> @smear_2f(float %high) nounwind readnone
+  %_low = tail call <2 x float> @smear_2f(float %low) nounwind readnone
+  %a = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %_high) nounwind readnone
+  %b = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %a, <2 x float> %_low) nounwind readnone
+  ret <2 x float> %b
+}
+
+define float @_Z5clampfff(float %value, float %low, float %high) nounwind readonly {
+  %1 = fcmp olt float %value, %high
+  %2 = select i1 %1, float %value, float %high
+  %3 = fcmp ogt float %2, %low
+  %4 = select i1 %3, float %2, float %low
+  ret float %4
+}
+
+
+
+define <4 x i32> @_Z5clampDv4_iS_S_(<4 x i32> %value, <4 x i32> %low, <4 x i32> %high) nounwind readonly {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %value, <4 x i32> %high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @_Z5clampDv4_iii(<4 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %value, <4 x i32> %_high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %_low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <3 x i32> @_Z5clampDv3_iS_S_(<3 x i32> %value, <3 x i32> %low, <3 x i32> %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x i32> %low, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x i32> %high, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <3 x i32> @_Z5clampDv3_iii(<3 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %a = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <2 x i32> @_Z5clampDv2_iS_S_(<2 x i32> %value, <2 x i32> %low, <2 x i32> %high) nounwind readonly {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %value, <2 x i32> %high) nounwind readnone
+  %2 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %low) nounwind readnone
+  ret <2 x i32> %2
+}
+
+define <2 x i32> @_Z5clampDv2_iii(<2 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <2 x i32> @smear_2i(i32 %high) nounwind readnone
+  %_low = tail call <2 x i32> @smear_2i(i32 %low) nounwind readnone
+  %a = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %value, <2 x i32> %_high) nounwind readnone
+  %b = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %a, <2 x i32> %_low) nounwind readnone
+  ret <2 x i32> %b
+}
+
+
+
+define <4 x i32> @_Z5clampDv4_jS_S_(<4 x i32> %value, <4 x i32> %low, <4 x i32> %high) nounwind readonly {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %value, <4 x i32> %high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @_Z5clampDv4_jjj(<4 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %value, <4 x i32> %_high) nounwind readnone
+  %2 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %_low) nounwind readnone
+  ret <4 x i32> %2
+}
+
+define <3 x i32> @_Z5clampDv3_jS_S_(<3 x i32> %value, <3 x i32> %low, <3 x i32> %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x i32> %low, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x i32> %high, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <3 x i32> @_Z5clampDv3_jjj(<3 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_value = shufflevector <3 x i32> %value, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x i32> @smear_4i(i32 %high) nounwind readnone
+  %_low = tail call <4 x i32> @smear_4i(i32 %low) nounwind readnone
+  %a = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %_value, <4 x i32> %_high) nounwind readnone
+  %b = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %a, <4 x i32> %_low) nounwind readnone
+  %c = shufflevector <4 x i32> %b, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %c
+}
+
+define <2 x i32> @_Z5clampDv2_jS_S_(<2 x i32> %value, <2 x i32> %low, <2 x i32> %high) nounwind readonly {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %value, <2 x i32> %high) nounwind readnone
+  %2 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %low) nounwind readnone
+  ret <2 x i32> %2
+}
+
+define <2 x i32> @_Z5clampDv2_jjj(<2 x i32> %value, i32 %low, i32 %high) nounwind readonly {
+  %_high = tail call <2 x i32> @smear_2i(i32 %high) nounwind readnone
+  %_low = tail call <2 x i32> @smear_2i(i32 %low) nounwind readnone
+  %a = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %value, <2 x i32> %_high) nounwind readnone
+  %b = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %a, <2 x i32> %_low) nounwind readnone
+  ret <2 x i32> %b
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMAX                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fmaxff(float %v1, float %v2) nounwind readonly {
+  %1 = fcmp ogt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMIN                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fminff(float %v1, float %v2) nounwind readnone {
+  %1 = fcmp olt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MAX                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3maxcc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp sgt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3maxss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp sgt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp sgt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp sgt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3maxhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3maxtt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ugt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ugt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3maxff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fmaxff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3maxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3maxDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MIN                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3mincc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp slt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3minss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp slt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp slt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp slt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3minhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3mintt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ult i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ult i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3minff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fminff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3minDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3minDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  YUV                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+@yuv_U = internal constant <4 x i32> <i32 0, i32 -100, i32 516, i32 0>, align 16
+@yuv_V = internal constant <4 x i32> <i32 409, i32 -208, i32 0, i32 0>, align 16
+@yuv_0 = internal constant <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+@yuv_255 = internal constant <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, align 16
+
+
+define <4 x i8> @_Z18rsYuvToRGBA_uchar4hhh(i8 %pY, i8 %pU, i8 %pV) nounwind readnone alwaysinline {
+  %_sy = zext i8 %pY to i32
+  %_su = zext i8 %pU to i32
+  %_sv = zext i8 %pV to i32
+
+  %_sy2 = add i32 -16, %_sy
+  %_sy3 = mul i32 298, %_sy2
+  %_su2 = add i32 -128, %_su
+  %_sv2 = add i32 -128, %_sv
+  %_y = tail call <4 x i32> @smear_4i32(i32 %_sy3) nounwind readnone
+  %_u = tail call <4 x i32> @smear_4i32(i32 %_su2) nounwind readnone
+  %_v = tail call <4 x i32> @smear_4i32(i32 %_sv2) nounwind readnone
+
+  %mu = load <4 x i32>* @yuv_U, align 8
+  %mv = load <4 x i32>* @yuv_V, align 8
+  %_u2 = mul <4 x i32> %_u, %mu
+  %_v2 = mul <4 x i32> %_v, %mv
+  %_y2 = add <4 x i32> %_y, %_u2
+  %_y3 = add <4 x i32> %_y2, %_v2
+
+ ; %r1 = tail call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %_y3, <4 x i32> <i32 8, i32 8, i32 8, i32 8>) nounwind readnone
+;  %r2 = trunc <4 x i16> %r1 to <4 x i8>
+;  ret <4 x i8> %r2
+
+  %c0 = load <4 x i32>* @yuv_0, align 8
+  %c255 = load <4 x i32>* @yuv_255, align 8
+  %r1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %_y3, <4 x i32> %c0) nounwind readnone
+  %r2 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %r1, <4 x i32> %c255) nounwind readnone
+  %r3 = lshr <4 x i32> %r2, <i32 8, i32 8, i32 8, i32 8>
+  %r4 = trunc <4 x i32> %r3 to <4 x i8>
+  ret <4 x i8> %r4
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_RECIP              ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z10half_recipf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %1) nounwind readnone
+  %3 = extractelement <2 x float> %2, i32 0
+  ret float %3
+}
+
+define <2 x float> @_Z10half_recip2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %v) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z10half_recip3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %1) nounwind readnone
+  %3 = shufflevector <4 x float> %2, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %3
+}
+
+define <4 x float> @_Z10half_recip4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %v) nounwind readnone
+  ret <4 x float> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_SQRT               ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z9half_sqrtf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %1) nounwind readnone
+  %3 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %2) nounwind readnone
+  %4 = extractelement <2 x float> %3, i32 0
+  ret float %4
+}
+
+define <2 x float> @_Z9half_sqrt2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %v) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define <3 x float> @_Z9half_sqrt3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <4 x float> @_Z9half_sqrt4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %v) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              half_RSQRT              ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define float @_Z10half_rsqrtf(float %v) {
+  %1 = insertelement <2 x float> undef, float %v, i32 0
+  %2 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %1) nounwind readnone
+  %3 = extractelement <2 x float> %2, i32 0
+  ret float %3
+}
+
+define <2 x float> @_Z10half_rsqrt2Dv2_h(<2 x float> %v) nounwind readnone {
+  %1 = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %v) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z10half_rsqrt3Dv3_h(<3 x float> %v) nounwind readnone {
+  %1 = shufflevector <3 x float> %v, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %1) nounwind readnone
+  %3 = shufflevector <4 x float> %2, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %3
+}
+
+define <4 x float> @_Z10half_rsqrt4Dv4_h(<4 x float> %v) nounwind readnone {
+  %1 = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %v) nounwind readnone
+  ret <4 x float> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              matrix                    ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
+
+%struct.rs_matrix4x4 = type { [16 x float] }
+%struct.rs_matrix3x3 = type { [9 x float] }
+%struct.rs_matrix2x2 = type { [4 x float] }
+
+define internal <4 x float> @smear_f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to i8*
+  %xm = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %px2, i32 4) nounwind
+
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to i8*
+  %ym = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %py2, i32 4) nounwind
+
+  %pz = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 5
+  %pz2 = bitcast float* %pz to i8*
+  %zm2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %pz2, i32 4) nounwind
+  %zm = shufflevector <4 x float> %zm2, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a4, %a3
+  %a6 = shufflevector <4 x float> %a5, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a6
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = shufflevector <4 x float> %a3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a4
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %x0 = extractelement <4 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <4 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <4 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+  %w0 = extractelement <4 x float> %in, i32 3
+  %w = tail call <4 x float> @smear_f(float %w0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a3, %a4
+  %a6 = fmul <4 x float> %w, %wm
+  %a7 = fadd <4 x float> %a5, %a6
+  ret <4 x float> %a7
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  %a5 = fmul <4 x float> %z, %zm
+  %a6 = fadd <4 x float> %a4, %a5
+  ret <4 x float> %a6
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  ret <4 x float> %a4
+}
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;              pixel ops                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+@fc_255.0 = internal constant <4 x float> <float 255.0, float 255.0, float 255.0, float 255.0>, align 16
+@fc_0.5 = internal constant <4 x float> <float 0.5, float 0.5, float 0.5, float 0.5>, align 16
+@fc_0 = internal constant <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, align 16
+
+declare <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone
+declare <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %color) nounwind readnone {
+    %f255 = load <4 x float>* @fc_255.0, align 16
+    %f05 = load <4 x float>* @fc_0.5, align 16
+    %f0 = load <4 x float>* @fc_0, align 16
+    %v1 = fmul <4 x float> %f255, %color
+    %v2 = fadd <4 x float> %f05, %v1
+    %v3 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %v2, <4 x float> %f0, <4 x float> %f255) nounwind readnone
+    %v4 = tail call <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %v3) nounwind readnone
+    ret <4 x i8> %v4
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv3_f(<3 x float> %color) nounwind readnone {
+    %1 = shufflevector <3 x float> %color, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+    %2 = insertelement <4 x float> %1, float 1.0, i32 3
+    %3 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %2) nounwind readnone
+    ret <4 x i8> %3
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+define <4 x i8> @_Z17rsPackColorTo8888fff(float %r, float %g, float %b) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float 1.0, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+define <4 x i8> @_Z17rsPackColorTo8888ffff(float %r, float %g, float %b, float %a) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float %a, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
diff --git a/driver/runtime/arch/sqrt.c b/driver/runtime/arch/sqrt.c
new file mode 100755
index 0000000..f1dac5f
--- /dev/null
+++ b/driver/runtime/arch/sqrt.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+#define FN_FUNC_FN(fnc)                                         \
+extern float2 __attribute__((overloadable)) fnc(float2 v) { \
+    float2 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern float3 __attribute__((overloadable)) fnc(float3 v) { \
+    float3 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern float4 __attribute__((overloadable)) fnc(float4 v) { \
+    float4 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+extern float __attribute__((overloadable)) sqrt(float);
+
+FN_FUNC_FN(sqrt)
diff --git a/driver/runtime/arch/x86_clamp.ll b/driver/runtime/arch/x86_clamp.ll
new file mode 100755
index 0000000..422e9f6
--- /dev/null
+++ b/driver/runtime/arch/x86_clamp.ll
@@ -0,0 +1,74 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+
+define <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %in, <4 x float> %low, <4 x float> %high) nounwind readnone alwaysinline {
+  %1 = tail call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %in, <4 x float> %high) nounwind readnone
+  %2 = tail call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %low) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %in, <3 x float> %low, <3 x float> %high) nounwind readnone alwaysinline {
+  %1 = shufflevector <3 x float> %in, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %low, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = shufflevector <3 x float> %high, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
+  %5 = shufflevector <4 x float> %4, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %5
+}
+
+define <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %in, <2 x float> %low, <2 x float> %high) nounwind readnone alwaysinline {
+  %1 = shufflevector <2 x float> %in, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <2 x float> %low, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = shufflevector <2 x float> %high, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %1, <4 x float> %2, <4 x float> %3) nounwind readnone
+  %5 = shufflevector <4 x float> %4, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %5
+}
+
+define float @_Z5clampfff(float %in, float %low, float %high) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> undef, float %low, i32 0
+  %3 = insertelement <4 x float> undef, float %high, i32 0
+  %4 = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %3) nounwind readnone
+  %5 = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %4, <4 x float> %2) nounwind readnone
+  %6 = extractelement <4 x float> %5, i32 0
+  ret float %6
+}
+
+define <4 x float> @_Z5clampDv4_fff(<4 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <4 x float> undef, float %low, i32 0
+  %2 = insertelement <4 x float> %1, float %low, i32 1
+  %3 = insertelement <4 x float> %2, float %low, i32 2
+  %4 = insertelement <4 x float> %3, float %low, i32 3
+  %5 = insertelement <4 x float> undef, float %high, i32 0
+  %6 = insertelement <4 x float> %5, float %high, i32 1
+  %7 = insertelement <4 x float> %6, float %high, i32 2
+  %8 = insertelement <4 x float> %7, float %high, i32 3
+  %9 = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %in, <4 x float> %4, <4 x float> %8) nounwind readnone
+  ret <4 x float> %9
+}
+
+define <3 x float> @_Z5clampDv3_fff(<3 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <3 x float> undef, float %low, i32 0
+  %2 = insertelement <3 x float> %1, float %low, i32 1
+  %3 = insertelement <3 x float> %2, float %low, i32 2
+  %4 = insertelement <3 x float> undef, float %high, i32 0
+  %5 = insertelement <3 x float> %4, float %high, i32 1
+  %6 = insertelement <3 x float> %5, float %high, i32 2
+  %7 = tail call <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %in, <3 x float> %3, <3 x float> %6) nounwind readnone
+  ret <3 x float> %7
+}
+
+define <2 x float> @_Z5clampDv2_fff(<2 x float> %in, float %low, float %high) nounwind readonly {
+  %1 = insertelement <2 x float> undef, float %low, i32 0
+  %2 = insertelement <2 x float> %1, float %low, i32 1
+  %3 = insertelement <2 x float> undef, float %high, i32 0
+  %4 = insertelement <2 x float> %3, float %high, i32 1
+  %5 = tail call <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %in, <2 x float> %2, <2 x float> %4) nounwind readnone
+  ret <2 x float> %5
+}
diff --git a/driver/runtime/arch/x86_dot_length.ll b/driver/runtime/arch/x86_dot_length.ll
new file mode 100644
index 0000000..21f2f3e
--- /dev/null
+++ b/driver/runtime/arch/x86_dot_length.ll
@@ -0,0 +1,75 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
+declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define float @_Z3dotDv4_fS_(<4 x float> %lhs, <4 x float> %rhs) nounwind readnone {
+  %1 = fmul <4 x float> %lhs, %rhs
+  %2 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  ret float %4
+}
+
+define float @_Z3dotDv3_fS_(<3 x float> %lhs, <3 x float> %rhs) nounwind readnone {
+  %1 = fmul <3 x float> %lhs, %rhs
+  %2 = shufflevector <3 x float> %1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = bitcast <4 x float> %2 to <2 x i64>
+  %4 = tail call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %3, i32 32)
+  %5 = bitcast <2 x i64> %4 to <4 x float>
+  %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %5, <4 x float> %5) nounwind readnone
+  %7 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %6, <4 x float> %6) nounwind readnone
+  %8 = extractelement <4 x float> %7, i32 0
+  ret float %8
+}
+
+define float @_Z3dotDv2_fS_(<2 x float> %lhs, <2 x float> %rhs) nounwind readnone {
+  %1 = fmul <2 x float> %lhs, %rhs
+  %2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  ret float %4
+}
+
+define float @_Z3dotff(float %lhs, float %rhs) nounwind readnone {
+  %1 = fmul float %lhs, %rhs
+  ret float %1
+}
+
+define float @_Z6lengthDv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <4 x float> %in, %in
+  %2 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %1) nounwind readnone
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  %5 = tail call float @llvm.sqrt.f32(float %4) nounwind readnone
+  ret float %5
+}
+
+define float @_Z6lengthDv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <3 x float> %in, %in
+  %2 = shufflevector <3 x float> %1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = bitcast <4 x float> %2 to <2 x i64>
+  %4 = tail call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %3, i32 32)
+  %5 = bitcast <2 x i64> %4 to <4 x float>
+  %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %5, <4 x float> %5) nounwind readnone
+  %7 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %6, <4 x float> %6) nounwind readnone
+  %8 = extractelement <4 x float> %7, i32 0
+  %9 = tail call float @llvm.sqrt.f32(float %8) nounwind readnone
+  ret float %9
+}
+
+define float @_Z6lengthDv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fmul <2 x float> %in, %in
+  %2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %2) nounwind readnone
+  %4 = extractelement <4 x float> %3, i32 0
+  %5 = tail call float @llvm.sqrt.f32(float %4) nounwind readnone
+  ret float %5
+}
+
+define float @_Z6lengthf(float %in) nounwind readnone alwaysinline {
+  ret float %in
+}
+
diff --git a/driver/runtime/arch/x86_generic.c b/driver/runtime/arch/x86_generic.c
new file mode 100644
index 0000000..c46c54a
--- /dev/null
+++ b/driver/runtime/arch/x86_generic.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+extern float4 __attribute__((overloadable)) clamp(float4 amount, float4 low, float4 high);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(short4);
+extern float __attribute__((overloadable)) sqrt(float);
+
+/*
+ * FMAX
+ */
+
+extern float __attribute__((overloadable)) fmax(float v1, float v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    r.w = v1.w > v2 ? v1.w : v2;
+    return r;
+}
+
+extern float __attribute__((overloadable)) fmin(float v1, float v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+
+/*
+ * FMIN
+ */
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    r.w = v1.w < v2 ? v1.w : v2;
+    return r;
+}
+
+
+/*
+ * MAX
+ */
+
+extern char __attribute__((overloadable)) max(char v1, char v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) max(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) max(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) max(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern short __attribute__((overloadable)) max(short v1, short v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) max(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) max(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) max(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int __attribute__((overloadable)) max(int v1, int v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) max(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) max(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) max(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) max(int64_t v1, int64_t v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) max(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) max(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) max(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) max(uchar v1, uchar v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) max(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) max(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) max(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) max(ushort v1, ushort v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) max(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) max(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) max(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) max(uint v1, uint v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) max(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) max(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) max(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) max(ulong v1, ulong v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) max(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) max(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) max(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) max(float v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float2 v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float3 v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float4 v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+
+/*
+ * MIN
+ */
+
+extern int8_t __attribute__((overloadable)) min(int8_t v1, int8_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) min(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) min(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) min(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int16_t __attribute__((overloadable)) min(int16_t v1, int16_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) min(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) min(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) min(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int32_t __attribute__((overloadable)) min(int32_t v1, int32_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) min(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) min(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) min(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) min(int64_t v1, int64_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) min(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) min(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) min(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) min(uchar v1, uchar v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) min(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) min(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) min(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) min(ushort v1, ushort v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) min(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) min(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) min(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) min(uint v1, uint v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) min(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) min(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) min(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) min(ulong v1, ulong v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) min(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) min(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) min(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) min(float v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float2 v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float3 v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float4 v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+
+/*
+ * YUV
+ */
+
+extern uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v) {
+    short Y = ((short)y) - 16;
+    short U = ((short)u) - 128;
+    short V = ((short)v) - 128;
+
+    short4 p;
+    p.r = (Y * 298 + V * 409 + 128) >> 8;
+    p.g = (Y * 298 - U * 100 - V * 208 + 128) >> 8;
+    p.b = (Y * 298 + U * 516 + 128) >> 8;
+    p.a = 255;
+    p.r = rsClamp(p.r, (short)0, (short)255);
+    p.g = rsClamp(p.g, (short)0, (short)255);
+    p.b = rsClamp(p.b, (short)0, (short)255);
+
+    return convert_uchar4(p);
+}
+
+static float4 yuv_U_values = {0.f, -0.392f * 0.003921569f, +2.02 * 0.003921569f, 0.f};
+static float4 yuv_V_values = {1.603f * 0.003921569f, -0.815f * 0.003921569f, 0.f, 0.f};
+
+extern float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v) {
+    float4 color = (float)y * 0.003921569f;
+    float4 fU = ((float)u) - 128.f;
+    float4 fV = ((float)v) - 128.f;
+
+    color += fU * yuv_U_values;
+    color += fV * yuv_V_values;
+    color = clamp(color, 0.f, 1.f);
+    return color;
+}
+
+
+/*
+ * half_RECIP
+ */
+
+extern float __attribute__((overloadable)) half_recip(float v) {
+    // FIXME:  actual algorithm for generic approximate reciprocal
+    return 1.f / v;
+}
+
+extern float2 __attribute__((overloadable)) half_recip(float2 v) {
+    float2 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_recip(float3 v) {
+    float3 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_recip(float4 v) {
+    float4 r;
+    r.x = half_recip(r.x);
+    r.y = half_recip(r.y);
+    r.z = half_recip(r.z);
+    r.w = half_recip(r.w);
+    return r;
+}
+
+
+/*
+ * half_SQRT
+ */
+
+extern float __attribute__((overloadable)) half_sqrt(float v) {
+    return sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_sqrt(float2 v) {
+    float2 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_sqrt(float3 v) {
+    float3 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_sqrt(float4 v) {
+    float4 r;
+    r.x = half_sqrt(v.x);
+    r.y = half_sqrt(v.y);
+    r.z = half_sqrt(v.z);
+    r.w = half_sqrt(v.w);
+    return r;
+}
+
+
+/*
+ * half_rsqrt
+ */
+
+extern float __attribute__((overloadable)) half_rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+
+extern float2 __attribute__((overloadable)) half_rsqrt(float2 v) {
+    float2 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) half_rsqrt(float3 v) {
+    float3 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) half_rsqrt(float4 v) {
+    float4 r;
+    r.x = half_rsqrt(v.x);
+    r.y = half_rsqrt(v.y);
+    r.z = half_rsqrt(v.z);
+    r.w = half_rsqrt(v.w);
+    return r;
+}
+
diff --git a/driver/runtime/arch/x86_math.ll b/driver/runtime/arch/x86_math.ll
new file mode 100755
index 0000000..60add80
--- /dev/null
+++ b/driver/runtime/arch/x86_math.ll
@@ -0,0 +1,40 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.sqrt.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readnone
+declare float @llvm.exp.f32(float) nounwind readonly
+declare float @llvm.pow.f32(float, float) nounwind readonly
+
+define float @_Z4sqrtf(float %in) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.sqrt.f32(float %in) nounwind readnone
+  ret float %1
+}
+
+define <2 x float> @_Z4sqrtDv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z4sqrtDv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <3 x float> @llvm.sqrt.v3f32(<3 x float> %in) nounwind readnone
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z4sqrtDv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %in) nounwind readnone
+  ret <4 x float> %1
+}
+
+define float @_Z3expf(float %in) nounwind readnone {
+  %1 = tail call float @llvm.exp.f32(float %in) nounwind readnone
+  ret float %1
+}
+
+define float @_Z3powff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @llvm.pow.f32(float %v1, float %v2) nounwind readnone
+  ret float %1
+}
+
diff --git a/driver/runtime/build_bc_lib.mk b/driver/runtime/build_bc_lib.mk
new file mode 100644
index 0000000..542cd78
--- /dev/null
+++ b/driver/runtime/build_bc_lib.mk
@@ -0,0 +1,75 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+BCC_STRIP_ATTR := $(HOST_OUT_EXECUTABLES)/bcc_strip_attr$(HOST_EXECUTABLE_SUFFIX)
+
+# We need to pass the +long64 flag to the underlying version of Clang, since
+# we are generating a library for use with Renderscript (64-bit long type,
+# not 32-bit).
+bc_clang_cc1_cflags := -target-feature +long64
+bc_translated_clang_cc1_cflags := $(addprefix -Xclang , $(bc_clang_cc1_cflags))
+
+bc_cflags := -MD \
+             -DRS_VERSION=$(RS_VERSION) \
+             -std=c99 \
+             -c \
+             -O3 \
+             -fno-builtin \
+             -emit-llvm \
+             -target armv7-none-linux-gnueabi \
+             -fsigned-char \
+             $(LOCAL_CFLAGS) \
+             $(bc_translated_clang_cc1_cflags)
+
+ifeq ($(rs_debug_runtime),1)
+bc_cflags += -DRS_DEBUG_RUNTIME
+endif
+rs_debug_runtime:=
+
+c_sources := $(filter %.c,$(LOCAL_SRC_FILES))
+ll_sources := $(filter %.ll,$(LOCAL_SRC_FILES))
+
+c_bc_files := $(patsubst %.c,%.bc, \
+    $(addprefix $(intermediates)/, $(c_sources)))
+
+ll_bc_files := $(patsubst %.ll,%.bc, \
+    $(addprefix $(intermediates)/, $(ll_sources)))
+
+$(c_bc_files): PRIVATE_INCLUDES := \
+    frameworks/rs/scriptc \
+    external/clang/lib/Headers
+$(c_bc_files): PRIVATE_CFLAGS := $(bc_cflags)
+
+$(c_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.c  $(CLANG)
+	@mkdir -p $(dir $@)
+	$(hide) $(CLANG) $(addprefix -I, $(PRIVATE_INCLUDES)) $(PRIVATE_CFLAGS) $< -o $@
+
+$(ll_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.ll $(LLVM_AS)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_AS) $< -o $@
+
+-include $(c_bc_files:%.bc=%.d)
+-include $(ll_bc_files:%.bc=%.d)
+
+$(LOCAL_BUILT_MODULE): PRIVATE_BC_FILES := $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(LLVM_LINK) $(clcore_LLVM_LD)
+$(LOCAL_BUILT_MODULE): $(LLVM_AS) $(BCC_STRIP_ATTR)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_LINK) $(PRIVATE_BC_FILES) -o $@.unstripped
+	$(hide) $(BCC_STRIP_ATTR) -o $@ $@.unstripped
diff --git a/driver/runtime/convert.ll b/driver/runtime/convert.ll
new file mode 100644
index 0000000..f45850d
--- /dev/null
+++ b/driver/runtime/convert.ll
@@ -0,0 +1,731 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FLOAT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <2 x float> @_Z14convert_float2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  ret <2 x float> %in
+}
+
+define <3 x float> @_Z14convert_float3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  ret <3 x float> %in
+}
+
+define <4 x float> @_Z14convert_float4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  ret <4 x float> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  CHAR                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+define <4 x i8> @_Z13convert_char4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UCHAR                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  SHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z14convert_short4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 USHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z15convert_ushort4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                   INT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z12convert_int4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UINT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z13convert_uint4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
diff --git a/driver/runtime/math.ll b/driver/runtime/math.ll
new file mode 100644
index 0000000..f026d15
--- /dev/null
+++ b/driver/runtime/math.ll
@@ -0,0 +1,19 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+declare float @llvm.sqrt.f32(float)
+declare float @llvm.pow.f32(float, float)
+declare float @llvm.fabs.f32(float)
+declare <2 x float> @llvm.fabs.v2f32(<2 x float>)
+declare <3 x float> @llvm.fabs.v3f32(<3 x float>)
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
+
+define float @_Z4sqrtf(float %v) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.sqrt.f32(float %v)
+  ret float %1
+}
+
+define float @_Z3powf(float %v1, float %v2) nounwind readnone alwaysinline {
+  %1 = tail call float @llvm.pow.f32(float  %v1, float %v2)
+  ret float %1
+}
diff --git a/driver/runtime/matrix.ll b/driver/runtime/matrix.ll
new file mode 100644
index 0000000..c56405d
--- /dev/null
+++ b/driver/runtime/matrix.ll
@@ -0,0 +1,176 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+%struct.rs_matrix4x4 = type { [16 x float] }
+%struct.rs_matrix3x3 = type { [9 x float] }
+%struct.rs_matrix2x2 = type { [4 x float] }
+
+define internal <4 x float> @smear_f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 6
+  %pz2 = bitcast float* %pz to <3 x float>*
+  %zm2 = load <3 x float>* %pz2, align 4
+  %zm = shufflevector <3 x float> %zm2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a4, %a3
+  %a6 = shufflevector <4 x float> %a5, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a6
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = shufflevector <4 x float> %a3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a4
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %x0 = extractelement <4 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <4 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <4 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+  %w0 = extractelement <4 x float> %in, i32 3
+  %w = tail call <4 x float> @smear_f(float %w0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a3, %a4
+  %a6 = fmul <4 x float> %w, %wm
+  %a7 = fadd <4 x float> %a5, %a6
+  ret <4 x float> %a7
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  %a5 = fmul <4 x float> %z, %zm
+  %a6 = fadd <4 x float> %a4, %a5
+  ret <4 x float> %a6
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2, align 4
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2, align 4
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2, align 4
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  ret <4 x float> %a4
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
diff --git a/driver/runtime/rsClamp.ll b/driver/runtime/rsClamp.ll
new file mode 100644
index 0000000..eba678a
--- /dev/null
+++ b/driver/runtime/rsClamp.ll
@@ -0,0 +1,60 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+define float @_Z7rsClampfff(float %value, float %low, float %high) nounwind readonly {
+  %1 = fcmp olt float %value, %high
+  %2 = select i1 %1, float %value, float %high
+  %3 = fcmp ogt float %2, %low
+  %4 = select i1 %3, float %2, float %low
+  ret float %4
+}
+
+define signext i8 @_Z7rsClampccc(i8 signext %value, i8 signext %low, i8 signext %high) nounwind readonly {
+  %1 = icmp slt i8 %value, %high
+  %2 = select i1 %1, i8 %value, i8 %high
+  %3 = icmp sgt i8 %2, %low
+  %4 = select i1 %3, i8 %2, i8 %low
+  ret i8 %4
+}
+
+define zeroext i8 @_Z7rsClamphhh(i8 zeroext %value, i8 zeroext %low, i8 zeroext %high) nounwind readonly {
+  %1 = icmp ult i8 %value, %high
+  %2 = select i1 %1, i8 %value, i8 %high
+  %3 = icmp ugt i8 %2, %low
+  %4 = select i1 %3, i8 %2, i8 %low
+  ret i8 %4
+}
+
+define signext i16 @_Z7rsClampsss(i16 signext %value, i16 signext %low, i16 signext %high) nounwind readonly {
+  %1 = icmp slt i16 %value, %high
+  %2 = select i1 %1, i16 %value, i16 %high
+  %3 = icmp sgt i16 %2, %low
+  %4 = select i1 %3, i16 %2, i16 %low
+  ret i16 %4
+}
+
+define zeroext i16 @_Z7rsClampttt(i16 zeroext %value, i16 zeroext %low, i16 zeroext %high) nounwind readonly {
+  %1 = icmp ult i16 %value, %high
+  %2 = select i1 %1, i16 %value, i16 %high
+  %3 = icmp ugt i16 %2, %low
+  %4 = select i1 %3, i16 %2, i16 %low
+  ret i16 %4
+}
+
+define i32 @_Z7rsClampiii(i32 %value, i32 %low, i32 %high) nounwind readonly {
+  %1 = icmp slt i32 %value, %high
+  %2 = select i1 %1, i32 %value, i32 %high
+  %3 = icmp sgt i32 %2, %low
+  %4 = select i1 %3, i32 %2, i32 %low
+  ret i32 %4
+}
+
+define i32 @_Z7rsClampjjj(i32 %value, i32 %low, i32 %high) nounwind readonly {
+  %1 = icmp ult i32 %value, %high
+  %2 = select i1 %1, i32 %value, i32 %high
+  %3 = icmp ugt i32 %2, %low
+  %4 = select i1 %3, i32 %2, i32 %low
+  ret i32 %4
+}
+
diff --git a/driver/runtime/rs_allocation.c b/driver/runtime/rs_allocation.c
new file mode 100644
index 0000000..1d0f5b6
--- /dev/null
+++ b/driver/runtime/rs_allocation.c
@@ -0,0 +1,310 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+// Opaque Allocation type operations
+extern uint32_t __attribute__((overloadable))
+    rsAllocationGetDimX(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimX;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimY(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimY;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimZ(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.drvState.lod[0].dimZ;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimLOD(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasMipmaps;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimFaces(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasFaces;
+}
+
+
+extern rs_element __attribute__((overloadable))
+        rsAllocationGetElement(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    if (alloc == NULL) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    Type_t *type = (Type_t *)alloc->mHal.state.type;
+    rs_element returnElem = {type->mHal.state.element};
+    return returnElem;
+}
+
+// TODO: this needs to be optimized, obviously
+static void memcpy(void* dst, void* src, size_t size) {
+    char* dst_c = (char*) dst, *src_c = (char*) src;
+    for (; size > 0; size--) {
+        *dst_c++ = *src_c++;
+    }
+}
+
+#ifdef RS_DEBUG_RUNTIME
+#define ELEMENT_AT(T)                                                   \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x);  \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x, uint32_t y); \
+    extern void __attribute__((overloadable))                           \
+        rsSetElementAt_##T(rs_allocation a, const T *val, uint32_t x, uint32_t y, uint32_t z); \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x);  \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x, uint32_t y); \
+    extern void __attribute__((overloadable))                           \
+        rsGetElementAt_##T(rs_allocation a, T *val, uint32_t x, uint32_t y, uint32_t z); \
+                                                                        \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x) {            \
+        rsSetElementAt_##T(a, &val, x);                                 \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y) { \
+        rsSetElementAt_##T(a, &val, x, y);                              \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y, uint32_t z) { \
+        rsSetElementAt_##T(a, &val, x, y, z);                           \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x) {                   \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x);                                 \
+        return tmp;                                                     \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y) {       \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x, y);                              \
+        return tmp;                                                     \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) { \
+        T tmp;                                                          \
+        rsGetElementAt_##T(a, &tmp, x, y, z);                           \
+        return tmp;                                                     \
+    }
+
+#else
+#define ELEMENT_AT(T)                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x) {            \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t eSize = sizeof(T);                               \
+        *((T*)&p[(eSize * x)]) = val;                                   \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t eSize = sizeof(T);                               \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        *((T*)&p[(eSize * x) + (y * stride)]) = val;                    \
+    }                                                                   \
+    extern void __attribute__((overloadable))                           \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y, uint32_t z) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        uint8_t *p = (uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;         \
+        uint8_t *dp = &p[(sizeof(T) * x) + (y * stride) + (z * stride * dimY)]; \
+        ((T*)dp)[0] = val;                                        \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x) {                   \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        return *((T*)&p[(sizeof(T) * x)]);                              \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y) {       \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        return *((T*)&p[(sizeof(T) * x) + (y * stride)]);               \
+    }                                                                   \
+    extern T __attribute__((overloadable))                              \
+    rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) { \
+        Allocation_t *alloc = (Allocation_t *)a.p;                      \
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr; \
+        const uint32_t stride = alloc->mHal.drvState.lod[0].stride;     \
+        const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;         \
+        const uint8_t *dp = &p[(sizeof(T) * x) + (y * stride) + (z * stride * dimY)]; \
+        return ((const T*)dp)[0];                                       \
+    }
+
+
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    return &p[eSize * x];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    return &p[(eSize * x) + (y * stride)];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;
+    return &p[(eSize * x) + (y * stride) + (z * stride * dimY)];
+}
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    memcpy((void*)&p[eSize * x], ptr, eSize);
+}
+
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    memcpy((void*)&p[(eSize * x) + (y * stride)], ptr, eSize);
+}
+
+extern void __attribute__((overloadable))
+        rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y, uint32_t z) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.lod[0].stride;
+    const uint32_t dimY = alloc->mHal.drvState.lod[0].dimY;
+    memcpy((void*)&p[(eSize * x) + (y * stride) + (z * stride * dimY)], ptr, eSize);
+}
+#endif
+
+ELEMENT_AT(char)
+ELEMENT_AT(char2)
+ELEMENT_AT(char3)
+ELEMENT_AT(char4)
+ELEMENT_AT(uchar)
+ELEMENT_AT(uchar2)
+ELEMENT_AT(uchar3)
+ELEMENT_AT(uchar4)
+ELEMENT_AT(short)
+ELEMENT_AT(short2)
+ELEMENT_AT(short3)
+ELEMENT_AT(short4)
+ELEMENT_AT(ushort)
+ELEMENT_AT(ushort2)
+ELEMENT_AT(ushort3)
+ELEMENT_AT(ushort4)
+ELEMENT_AT(int)
+ELEMENT_AT(int2)
+ELEMENT_AT(int3)
+ELEMENT_AT(int4)
+ELEMENT_AT(uint)
+ELEMENT_AT(uint2)
+ELEMENT_AT(uint3)
+ELEMENT_AT(uint4)
+ELEMENT_AT(long)
+ELEMENT_AT(long2)
+ELEMENT_AT(long3)
+ELEMENT_AT(long4)
+ELEMENT_AT(ulong)
+ELEMENT_AT(ulong2)
+ELEMENT_AT(ulong3)
+ELEMENT_AT(ulong4)
+ELEMENT_AT(float)
+ELEMENT_AT(float2)
+ELEMENT_AT(float3)
+ELEMENT_AT(float4)
+ELEMENT_AT(double)
+ELEMENT_AT(double2)
+ELEMENT_AT(double3)
+ELEMENT_AT(double4)
+
+#undef ELEMENT_AT
+
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_Y(rs_allocation a, uint32_t x, uint32_t y) {
+    return rsGetElementAt_uchar(a, x, y);
+}
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_U(rs_allocation a, uint32_t x, uint32_t y) {
+
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint32_t yuvID = alloc->mHal.state.yuv;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[1].mallocPtr;
+    const uint32_t stride = alloc->mHal.drvState.lod[1].stride;
+
+    switch(yuvID) {
+    case 0x32315659: //HAL_PIXEL_FORMAT_YV12:
+        x >>= 1;
+        y >>= 1;
+        return p[x + (y * stride)];
+    case 11: //HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
+        x >>= 1;
+        y >>= 1;
+        return p[(x<<1) + (y * stride)];
+    default:
+        break;
+    }
+
+    return 0;
+}
+
+extern const uchar __attribute__((overloadable))
+        rsGetElementAtYuv_uchar_V(rs_allocation a, uint32_t x, uint32_t y) {
+
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint32_t yuvID = alloc->mHal.state.yuv;
+
+    switch(yuvID) {
+    case 0x32315659: //HAL_PIXEL_FORMAT_YV12:
+        {
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[2].mallocPtr;
+        const uint32_t stride = alloc->mHal.drvState.lod[2].stride;
+        x >>= 1;
+        y >>= 1;
+        return p[x + (y * stride)];
+        }
+    case 11: //HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
+        {
+        const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[1].mallocPtr;
+        const uint32_t stride = alloc->mHal.drvState.lod[1].stride;
+        x >>= 1;
+        y >>= 1;
+        return p[(x<<1) + (y * stride) + 1];
+        }
+    default:
+            break;
+    }
+
+    return 0;
+}
+
diff --git a/driver/runtime/rs_cl.c b/driver/runtime/rs_cl.c
new file mode 100755
index 0000000..b7f9158
--- /dev/null
+++ b/driver/runtime/rs_cl.c
@@ -0,0 +1,1194 @@
+#include "rs_types.rsh"
+
+extern float2 __attribute__((overloadable)) convert_float2(int2 c);
+extern float3 __attribute__((overloadable)) convert_float3(int3 c);
+extern float4 __attribute__((overloadable)) convert_float4(int4 c);
+
+extern int2 __attribute__((overloadable)) convert_int2(float2 c);
+extern int3 __attribute__((overloadable)) convert_int3(float3 c);
+extern int4 __attribute__((overloadable)) convert_int4(float4 c);
+
+
+extern float __attribute__((overloadable)) fmin(float v, float v2);
+extern float2 __attribute__((overloadable)) fmin(float2 v, float v2);
+extern float3 __attribute__((overloadable)) fmin(float3 v, float v2);
+extern float4 __attribute__((overloadable)) fmin(float4 v, float v2);
+
+extern float __attribute__((overloadable)) fmax(float v, float v2);
+extern float2 __attribute__((overloadable)) fmax(float2 v, float v2);
+extern float3 __attribute__((overloadable)) fmax(float3 v, float v2);
+extern float4 __attribute__((overloadable)) fmax(float4 v, float v2);
+
+// Float ops, 6.11.2
+
+#define FN_FUNC_FN(fnc)                                         \
+extern float2 __attribute__((overloadable)) fnc(float2 v) { \
+    float2 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern float3 __attribute__((overloadable)) fnc(float3 v) { \
+    float3 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern float4 __attribute__((overloadable)) fnc(float4 v) { \
+    float4 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define IN_FUNC_FN(fnc)                                         \
+extern int2 __attribute__((overloadable)) fnc(float2 v) {   \
+    int2 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern int3 __attribute__((overloadable)) fnc(float3 v) {   \
+    int3 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern int4 __attribute__((overloadable)) fnc(float4 v) {   \
+    int4 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define FN_FUNC_FN_FN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2) { \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2) { \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float4 v2) { \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_F(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float v2) {  \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float v2) {  \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float v2) {  \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_IN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2) {   \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2) {   \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2) {   \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_I(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int v2) {    \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int v2) {    \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int v2) {    \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_PFN(fnc)                     \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 *v2) {            \
+    float2 r;                                   \
+    float t[2];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 *v2) {            \
+    float3 r;                                   \
+    float t[3];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 *v2) {            \
+    float4 r;                                   \
+    float t[4];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    r.w = fnc(v1.w, &t[3]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    v2->w = t[3];                               \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_PIN(fnc)                                                 \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2) {  \
+    float2 r;                                                               \
+    int t[2];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2) {  \
+    float3 r;                                                               \
+    int t[3];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 *v2) {  \
+    float4 r;                                                               \
+    int t[4];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    r.w = fnc(v1.w, &t[3]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    v2->w = t[3];                                                           \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_FN_FN(fnc)                   \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, float2 v3) {  \
+    float2 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, float3 v3) {  \
+    float3 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, float4 v3) {  \
+    float4 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    r.w = fnc(v1.w, v2.w, v3.w);                \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_FN_PIN(fnc)                  \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, int2 *v3) {   \
+    float2 r;                                   \
+    int t[2];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, int3 *v3) {   \
+    float3 r;                                   \
+    int t[3];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, int4 *v3) {   \
+    float4 r;                                   \
+    int t[4];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    r.w = fnc(v1.w, v2.w, &t[3]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    v3->w = t[3];                               \
+    return r;                                   \
+}
+
+static const int iposinf = 0x7f800000;
+static const int ineginf = 0xff800000;
+
+static const float posinf() {
+    float f = *((float*)&iposinf);
+    return f;
+}
+
+static const float neginf() {
+    float f = *((float*)&ineginf);
+    return f;
+}
+
+static bool isinf(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == iposinf) || (i == ineginf);
+}
+
+static bool isnan(float f) {
+    int i = *((int*)(void*)&f);
+    return (((i & 0x7f800000) == 0x7f800000) && (i & 0x007fffff));
+}
+
+static bool isposzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x00000000);
+}
+
+static bool isnegzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x80000000);
+}
+
+static bool iszero(float f) {
+    return isposzero(f) || isnegzero(f);
+}
+
+
+extern float __attribute__((overloadable)) acos(float);
+FN_FUNC_FN(acos)
+
+extern float __attribute__((overloadable)) acosh(float);
+FN_FUNC_FN(acosh)
+
+
+extern float __attribute__((overloadable)) acospi(float v) {
+    return acos(v) / M_PI;
+}
+FN_FUNC_FN(acospi)
+
+extern float __attribute__((overloadable)) asin(float);
+FN_FUNC_FN(asin)
+
+extern float __attribute__((overloadable)) asinh(float);
+FN_FUNC_FN(asinh)
+
+extern float __attribute__((overloadable)) asinpi(float v) {
+    return asin(v) / M_PI;
+}
+FN_FUNC_FN(asinpi)
+
+extern float __attribute__((overloadable)) atan(float);
+FN_FUNC_FN(atan)
+
+extern float __attribute__((overloadable)) atan2(float, float);
+FN_FUNC_FN_FN(atan2)
+
+extern float __attribute__((overloadable)) atanh(float);
+FN_FUNC_FN(atanh)
+
+extern float __attribute__((overloadable)) atanpi(float v) {
+    return atan(v) / M_PI;
+}
+FN_FUNC_FN(atanpi)
+
+
+extern float __attribute__((overloadable)) atan2pi(float y, float x) {
+    return atan2(y, x) / M_PI;
+}
+FN_FUNC_FN_FN(atan2pi)
+
+extern float __attribute__((overloadable)) cbrt(float);
+FN_FUNC_FN(cbrt)
+
+extern float __attribute__((overloadable)) ceil(float);
+FN_FUNC_FN(ceil)
+
+extern float __attribute__((overloadable)) copysign(float, float);
+FN_FUNC_FN_FN(copysign)
+
+extern float __attribute__((overloadable)) cos(float);
+FN_FUNC_FN(cos)
+
+extern float __attribute__((overloadable)) cosh(float);
+FN_FUNC_FN(cosh)
+
+extern float __attribute__((overloadable)) cospi(float v) {
+    return cos(v * M_PI);
+}
+FN_FUNC_FN(cospi)
+
+extern float __attribute__((overloadable)) erfc(float);
+FN_FUNC_FN(erfc)
+
+extern float __attribute__((overloadable)) erf(float);
+FN_FUNC_FN(erf)
+
+extern float __attribute__((overloadable)) exp(float);
+FN_FUNC_FN(exp)
+
+extern float __attribute__((overloadable)) exp2(float);
+FN_FUNC_FN(exp2)
+
+extern float __attribute__((overloadable)) pow(float, float);
+
+extern float __attribute__((overloadable)) exp10(float v) {
+    return exp2(v * 3.321928095f);
+}
+FN_FUNC_FN(exp10)
+
+extern float __attribute__((overloadable)) expm1(float);
+FN_FUNC_FN(expm1)
+
+extern float __attribute__((overloadable)) fabs(float v) {
+    int i = *((int*)(void*)&v) & 0x7fffffff;
+    return  *((float*)(void*)&i);
+}
+FN_FUNC_FN(fabs)
+
+extern float __attribute__((overloadable)) fdim(float, float);
+FN_FUNC_FN_FN(fdim)
+
+extern float __attribute__((overloadable)) floor(float);
+FN_FUNC_FN(floor)
+
+extern float __attribute__((overloadable)) fma(float, float, float);
+FN_FUNC_FN_FN_FN(fma)
+
+extern float __attribute__((overloadable)) fmin(float, float);
+
+extern float __attribute__((overloadable)) fmod(float, float);
+FN_FUNC_FN_FN(fmod)
+
+extern float __attribute__((overloadable)) fract(float v, float *iptr) {
+    int i = (int)floor(v);
+    if (iptr) {
+        iptr[0] = i;
+    }
+    return fmin(v - i, 0x1.fffffep-1f);
+}
+FN_FUNC_FN_PFN(fract)
+
+extern float __attribute__((overloadable)) frexp(float, int *);
+FN_FUNC_FN_PIN(frexp)
+
+extern float __attribute__((overloadable)) hypot(float, float);
+FN_FUNC_FN_FN(hypot)
+
+extern int __attribute__((overloadable)) ilogb(float);
+IN_FUNC_FN(ilogb)
+
+extern float __attribute__((overloadable)) ldexp(float, int);
+FN_FUNC_FN_IN(ldexp)
+FN_FUNC_FN_I(ldexp)
+
+extern float __attribute__((overloadable)) lgamma(float);
+FN_FUNC_FN(lgamma)
+extern float __attribute__((overloadable)) lgamma(float, int*);
+FN_FUNC_FN_PIN(lgamma)
+
+extern float __attribute__((overloadable)) log(float);
+FN_FUNC_FN(log)
+
+extern float __attribute__((overloadable)) log10(float);
+FN_FUNC_FN(log10)
+
+
+extern float __attribute__((overloadable)) log2(float v) {
+    return log10(v) * 3.321928095f;
+}
+FN_FUNC_FN(log2)
+
+extern float __attribute__((overloadable)) log1p(float);
+FN_FUNC_FN(log1p)
+
+extern float __attribute__((overloadable)) logb(float);
+FN_FUNC_FN(logb)
+
+extern float __attribute__((overloadable)) mad(float a, float b, float c) {
+    return a * b + c;
+}
+extern float2 __attribute__((overloadable)) mad(float2 a, float2 b, float2 c) {
+    return a * b + c;
+}
+extern float3 __attribute__((overloadable)) mad(float3 a, float3 b, float3 c) {
+    return a * b + c;
+}
+extern float4 __attribute__((overloadable)) mad(float4 a, float4 b, float4 c) {
+    return a * b + c;
+}
+
+extern float __attribute__((overloadable)) modf(float, float *);
+FN_FUNC_FN_PFN(modf);
+
+extern float __attribute__((overloadable)) nan(uint v) {
+    float f[1];
+    uint32_t *ip = (uint32_t *)f;
+    *ip = v | 0x7fc00000;
+    return f[0];
+}
+
+extern float __attribute__((overloadable)) nextafter(float, float);
+FN_FUNC_FN_FN(nextafter)
+
+FN_FUNC_FN_FN(pow)
+
+extern float __attribute__((overloadable)) pown(float v, int p) {
+    return pow(v, (float)p);
+}
+extern float2 __attribute__((overloadable)) pown(float2 v, int2 p) {
+    float2 f2 = convert_float2(p);
+    return pow(v, f2);
+}
+extern float3 __attribute__((overloadable)) pown(float3 v, int3 p) {
+    float3 f3 = convert_float3(p);
+    return pow(v, f3);
+}
+extern float4 __attribute__((overloadable)) pown(float4 v, int4 p) {
+    float4 f4 = convert_float4(p);
+    return pow(v, f4);
+}
+
+extern float __attribute__((overloadable)) powr(float v, float p) {
+    return pow(v, p);
+}
+extern float2 __attribute__((overloadable)) powr(float2 v, float2 p) {
+    return pow(v, p);
+}
+extern float3 __attribute__((overloadable)) powr(float3 v, float3 p) {
+    return pow(v, p);
+}
+extern float4 __attribute__((overloadable)) powr(float4 v, float4 p) {
+    return pow(v, p);
+}
+
+extern float __attribute__((overloadable)) remainder(float, float);
+FN_FUNC_FN_FN(remainder)
+
+extern float __attribute__((overloadable)) remquo(float, float, int *);
+FN_FUNC_FN_FN_PIN(remquo)
+
+extern float __attribute__((overloadable)) rint(float);
+FN_FUNC_FN(rint)
+
+extern float __attribute__((overloadable)) rootn(float v, int r) {
+    if (r == 0) {
+        return nan(0);
+    }
+
+    if (iszero(v)) {
+        if (r < 0) {
+            if (r & 1) {
+                return copysign(posinf(), v);
+            } else {
+                return posinf();
+            }
+        } else {
+            if (r & 1) {
+                return copysign(0.f, v);
+            } else {
+                return 0.f;
+            }
+        }
+    }
+
+    if (!isinf(v) && !isnan(v) && (v < 0.f)) {
+        if (r & 1) {
+            return (-1.f * pow(-1.f * v, 1.f / r));
+        } else {
+            return nan(0);
+        }
+    }
+
+    return pow(v, 1.f / r);
+}
+FN_FUNC_FN_IN(rootn);
+
+extern float __attribute__((overloadable)) round(float);
+FN_FUNC_FN(round)
+
+
+extern float __attribute__((overloadable)) sqrt(float);
+extern float __attribute__((overloadable)) rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+FN_FUNC_FN(rsqrt)
+
+extern float __attribute__((overloadable)) sin(float);
+FN_FUNC_FN(sin)
+
+extern float __attribute__((overloadable)) sincos(float v, float *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float2 __attribute__((overloadable)) sincos(float2 v, float2 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float3 __attribute__((overloadable)) sincos(float3 v, float3 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float4 __attribute__((overloadable)) sincos(float4 v, float4 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+
+extern float __attribute__((overloadable)) sinh(float);
+FN_FUNC_FN(sinh)
+
+extern float __attribute__((overloadable)) sinpi(float v) {
+    return sin(v * M_PI);
+}
+FN_FUNC_FN(sinpi)
+
+extern float __attribute__((overloadable)) tan(float);
+FN_FUNC_FN(tan)
+
+extern float __attribute__((overloadable)) tanh(float);
+FN_FUNC_FN(tanh)
+
+extern float __attribute__((overloadable)) tanpi(float v) {
+    return tan(v * M_PI);
+}
+FN_FUNC_FN(tanpi)
+
+
+extern float __attribute__((overloadable)) tgamma(float);
+FN_FUNC_FN(tgamma)
+
+extern float __attribute__((overloadable)) trunc(float);
+FN_FUNC_FN(trunc)
+
+// Int ops (partial), 6.11.3
+
+#define XN_FUNC_YN(typeout, fnc, typein)                                \
+extern typeout __attribute__((overloadable)) fnc(typein);               \
+extern typeout##2 __attribute__((overloadable)) fnc(typein##2 v) {  \
+    typeout##2 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##3 __attribute__((overloadable)) fnc(typein##3 v) {  \
+    typeout##3 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##4 __attribute__((overloadable)) fnc(typein##4 v) {  \
+    typeout##4 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    r.w = fnc(v.w);                                                     \
+    return r;                                                           \
+}
+
+
+#define UIN_FUNC_IN(fnc)          \
+XN_FUNC_YN(uchar, fnc, char)      \
+XN_FUNC_YN(ushort, fnc, short)    \
+XN_FUNC_YN(uint, fnc, int)
+
+#define IN_FUNC_IN(fnc)           \
+XN_FUNC_YN(uchar, fnc, uchar)     \
+XN_FUNC_YN(char, fnc, char)       \
+XN_FUNC_YN(ushort, fnc, ushort)   \
+XN_FUNC_YN(short, fnc, short)     \
+XN_FUNC_YN(uint, fnc, uint)       \
+XN_FUNC_YN(int, fnc, int)
+
+
+#define XN_FUNC_XN_XN_BODY(type, fnc, body)         \
+extern type __attribute__((overloadable))       \
+        fnc(type v1, type v2) {                     \
+    return body;                                    \
+}                                                   \
+extern type##2 __attribute__((overloadable))    \
+        fnc(type##2 v1, type##2 v2) {               \
+    type##2 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    return r;                                       \
+}                                                   \
+extern type##3 __attribute__((overloadable))    \
+        fnc(type##3 v1, type##3 v2) {               \
+    type##3 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    return r;                                       \
+}                                                   \
+extern type##4 __attribute__((overloadable))    \
+        fnc(type##4 v1, type##4 v2) {               \
+    type##4 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    r.w = fnc(v1.w, v2.w);                          \
+    return r;                                       \
+}
+
+#define IN_FUNC_IN_IN_BODY(fnc, body) \
+XN_FUNC_XN_XN_BODY(uchar, fnc, body)  \
+XN_FUNC_XN_XN_BODY(char, fnc, body)   \
+XN_FUNC_XN_XN_BODY(ushort, fnc, body) \
+XN_FUNC_XN_XN_BODY(short, fnc, body)  \
+XN_FUNC_XN_XN_BODY(uint, fnc, body)   \
+XN_FUNC_XN_XN_BODY(int, fnc, body)    \
+XN_FUNC_XN_XN_BODY(float, fnc, body)
+
+
+/**
+ * abs
+ */
+extern uint32_t __attribute__((overloadable)) abs(int32_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+extern uint16_t __attribute__((overloadable)) abs(int16_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+extern uint8_t __attribute__((overloadable)) abs(int8_t v) {
+    if (v < 0)
+        return -v;
+    return v;
+}
+
+/**
+ * clz
+ */
+extern uint32_t __attribute__((overloadable)) clz(uint32_t v) {
+    return __builtin_clz(v);
+}
+extern uint16_t __attribute__((overloadable)) clz(uint16_t v) {
+    return (uint16_t)__builtin_clz(v);
+}
+extern uint8_t __attribute__((overloadable)) clz(uint8_t v) {
+    return (uint8_t)__builtin_clz(v);
+}
+extern int32_t __attribute__((overloadable)) clz(int32_t v) {
+    return (int32_t)__builtin_clz((uint32_t)v);
+}
+extern int16_t __attribute__((overloadable)) clz(int16_t v) {
+    return (int16_t)__builtin_clz(v);
+}
+extern int8_t __attribute__((overloadable)) clz(int8_t v) {
+    return (int8_t)__builtin_clz(v);
+}
+
+
+UIN_FUNC_IN(abs)
+IN_FUNC_IN(clz)
+
+
+// 6.11.4
+
+
+extern float __attribute__((overloadable)) degrees(float radians) {
+    return radians * (180.f / M_PI);
+}
+extern float2 __attribute__((overloadable)) degrees(float2 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float3 __attribute__((overloadable)) degrees(float3 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float4 __attribute__((overloadable)) degrees(float4 radians) {
+    return radians * (180.f / M_PI);
+}
+
+extern float __attribute__((overloadable)) mix(float start, float stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float2 amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float3 amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float4 amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+
+extern float __attribute__((overloadable)) radians(float degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float2 __attribute__((overloadable)) radians(float2 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float3 __attribute__((overloadable)) radians(float3 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float4 __attribute__((overloadable)) radians(float4 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+
+extern float __attribute__((overloadable)) step(float edge, float v) {
+    return (v < edge) ? 0.f : 1.f;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float2 v) {
+    float2 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float3 v) {
+    float3 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float4 v) {
+    float4 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    r.w = (v.w < edge.w) ? 0.f : 1.f;
+    return r;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float v) {
+    float2 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float v) {
+    float3 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float v) {
+    float4 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    r.w = (v < edge.w) ? 0.f : 1.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) smoothstep(float, float, float);
+extern float2 __attribute__((overloadable)) smoothstep(float2, float2, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float3, float3, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float4, float4, float4);
+extern float2 __attribute__((overloadable)) smoothstep(float, float, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float, float, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float, float, float4);
+
+extern float __attribute__((overloadable)) sign(float v) {
+    if (v > 0) return 1.f;
+    if (v < 0) return -1.f;
+    return v;
+}
+FN_FUNC_FN(sign)
+
+
+// 6.11.5
+extern float3 __attribute__((overloadable)) cross(float3 lhs, float3 rhs) {
+    float3 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) cross(float4 lhs, float4 rhs) {
+    float4 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    r.w = 0.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) length(float v);
+extern float __attribute__((overloadable)) length(float2 v);
+extern float __attribute__((overloadable)) length(float3 v);
+extern float __attribute__((overloadable)) length(float4 v);
+
+extern float __attribute__((overloadable)) distance(float lhs, float rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float2 lhs, float2 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float3 lhs, float3 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float4 lhs, float4 rhs) {
+    return length(lhs - rhs);
+}
+
+extern float __attribute__((overloadable)) normalize(float v) {
+    return 1.f;
+}
+extern float2 __attribute__((overloadable)) normalize(float2 v) {
+    return v / length(v);
+}
+extern float3 __attribute__((overloadable)) normalize(float3 v) {
+    return v / length(v);
+}
+extern float4 __attribute__((overloadable)) normalize(float4 v) {
+    return v / length(v);
+}
+
+extern float __attribute__((overloadable)) half_sqrt(float);
+
+extern float __attribute__((overloadable)) fast_length(float v) {
+    return fabs(v);
+}
+extern float __attribute__((overloadable)) fast_length(float2 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y);
+}
+extern float __attribute__((overloadable)) fast_length(float3 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float __attribute__((overloadable)) fast_length(float4 v) {
+    return half_sqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
+extern float __attribute__((overloadable)) fast_distance(float lhs, float rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float2 lhs, float2 rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float3 lhs, float3 rhs) {
+    return fast_length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) fast_distance(float4 lhs, float4 rhs) {
+    return fast_length(lhs - rhs);
+}
+
+extern float __attribute__((overloadable)) half_rsqrt(float);
+
+extern float __attribute__((overloadable)) fast_normalize(float v) {
+    return 1.f;
+}
+extern float2 __attribute__((overloadable)) fast_normalize(float2 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y);
+}
+extern float3 __attribute__((overloadable)) fast_normalize(float3 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float4 __attribute__((overloadable)) fast_normalize(float4 v) {
+    return v * half_rsqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
+extern float __attribute__((overloadable)) half_recip(float);
+
+/*
+extern float __attribute__((overloadable)) approx_atan(float x) {
+    if (x == 0.f)
+        return 0.f;
+    if (x < 0.f)
+        return -1.f * approx_atan(-1.f * x);
+    if (x > 1.f)
+        return M_PI_2 - approx_atan(approx_recip(x));
+    return x * approx_recip(1.f + 0.28f * x*x);
+}
+FN_FUNC_FN(approx_atan)
+*/
+
+typedef union
+{
+  float fv;
+  int32_t iv;
+} ieee_float_shape_type;
+
+/* Get a 32 bit int from a float.  */
+
+#define GET_FLOAT_WORD(i,d)                 \
+do {                                \
+  ieee_float_shape_type gf_u;                   \
+  gf_u.fv = (d);                     \
+  (i) = gf_u.iv;                      \
+} while (0)
+
+/* Set a float from a 32 bit int.  */
+
+#define SET_FLOAT_WORD(d,i)                 \
+do {                                \
+  ieee_float_shape_type sf_u;                   \
+  sf_u.iv = (i);                      \
+  (d) = sf_u.fv;                     \
+} while (0)
+
+
+
+// Valid -125 to 125
+extern float __attribute__((overloadable)) native_exp2(float v) {
+    int32_t iv = (int)v;
+    int32_t x = iv + (iv >> 31); // ~floor(v)
+    float r = (v - x);
+
+    float fo;
+    SET_FLOAT_WORD(fo, (x + 127) << 23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float r2 = r*r;
+    float adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float2 __attribute__((overloadable)) native_exp2(float2 v) {
+    int2 iv = convert_int2(v);
+    int2 x = iv + (iv >> (int2)31);//floor(v);
+    float2 r = (v - convert_float2(x));
+
+    x += 127;
+
+    float2 fo = (float2)(x << (int2)23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float2 r2 = r*r;
+    float2 adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float4 __attribute__((overloadable)) native_exp2(float4 v) {
+    int4 iv = convert_int4(v);
+    int4 x = iv + (iv >> (int4)31);//floor(v);
+    float4 r = (v - convert_float4(x));
+
+    x += 127;
+
+    float4 fo = (float4)(x << (int4)23);
+
+    r *= 0.694f; // ~ log(e) / log(2)
+    float4 r2 = r*r;
+    float4 adj = 1.f + r + (r2 * 0.5f) + (r2*r * 0.166666f) + (r2*r2 * 0.0416666f);
+    return fo * adj;
+}
+
+extern float3 __attribute__((overloadable)) native_exp2(float3 v) {
+    float4 t = 1.f;
+    t.xyz = v;
+    return native_exp2(t).xyz;
+}
+
+
+extern float __attribute__((overloadable)) native_exp(float v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float2 __attribute__((overloadable)) native_exp(float2 v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float3 __attribute__((overloadable)) native_exp(float3 v) {
+    return native_exp2(v * 1.442695041f);
+}
+extern float4 __attribute__((overloadable)) native_exp(float4 v) {
+    return native_exp2(v * 1.442695041f);
+}
+
+extern float __attribute__((overloadable)) native_exp10(float v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float2 __attribute__((overloadable)) native_exp10(float2 v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float3 __attribute__((overloadable)) native_exp10(float3 v) {
+    return native_exp2(v * 3.321928095f);
+}
+extern float4 __attribute__((overloadable)) native_exp10(float4 v) {
+    return native_exp2(v * 3.321928095f);
+}
+
+extern float __attribute__((overloadable)) native_log2(float v) {
+    int32_t ibits;
+    GET_FLOAT_WORD(ibits, v);
+
+    int32_t e = (ibits >> 23) & 0xff;
+
+    ibits &= 0x7fffff;
+    ibits |= 127 << 23;
+
+    float ir;
+    SET_FLOAT_WORD(ir, ibits);
+
+    ir -= 1.5f;
+    float ir2 = ir*ir;
+    float adj2 = 0.405465108f + // -0.00009f +
+                 (0.666666667f * ir) -
+                 (0.222222222f * ir2) +
+                 (0.098765432f * ir*ir2) -
+                 (0.049382716f * ir2*ir2) +
+                 (0.026337449f * ir*ir2*ir2) -
+                 (0.014631916f * ir2*ir2*ir2);
+    adj2 *= (1.f / 0.693147181f);
+
+    return (float)(e - 127) + adj2;
+}
+extern float2 __attribute__((overloadable)) native_log2(float2 v) {
+    float2 v2 = {native_log2(v.x), native_log2(v.y)};
+    return v2;
+}
+extern float3 __attribute__((overloadable)) native_log2(float3 v) {
+    float3 v2 = {native_log2(v.x), native_log2(v.y), native_log2(v.z)};
+    return v2;
+}
+extern float4 __attribute__((overloadable)) native_log2(float4 v) {
+    float4 v2 = {native_log2(v.x), native_log2(v.y), native_log2(v.z), native_log2(v.w)};
+    return v2;
+}
+
+extern float __attribute__((overloadable)) native_log(float v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float2 __attribute__((overloadable)) native_log(float2 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float3 __attribute__((overloadable)) native_log(float3 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+extern float4 __attribute__((overloadable)) native_log(float4 v) {
+    return native_log2(v) * (1.f / 1.442695041f);
+}
+
+extern float __attribute__((overloadable)) native_log10(float v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float2 __attribute__((overloadable)) native_log10(float2 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float3 __attribute__((overloadable)) native_log10(float3 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+extern float4 __attribute__((overloadable)) native_log10(float4 v) {
+    return native_log2(v) * (1.f / 3.321928095f);
+}
+
+
+extern float __attribute__((overloadable)) native_powr(float v, float y) {
+    float v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float2 __attribute__((overloadable)) native_powr(float2 v, float2 y) {
+    float2 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float3 __attribute__((overloadable)) native_powr(float3 v, float3 y) {
+    float3 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+extern float4 __attribute__((overloadable)) native_powr(float4 v, float4 y) {
+    float4 v2 = native_log2(v);
+    v2 = fmax(v2, -125.f);
+    return native_exp2(v2 * y);
+}
+
+
+#undef FN_FUNC_FN
+#undef IN_FUNC_FN
+#undef FN_FUNC_FN_FN
+#undef FN_FUNC_FN_F
+#undef FN_FUNC_FN_IN
+#undef FN_FUNC_FN_I
+#undef FN_FUNC_FN_PFN
+#undef FN_FUNC_FN_PIN
+#undef FN_FUNC_FN_FN_FN
+#undef FN_FUNC_FN_FN_PIN
+#undef XN_FUNC_YN
+#undef UIN_FUNC_IN
+#undef IN_FUNC_IN
+#undef XN_FUNC_XN_XN_BODY
+#undef IN_FUNC_IN_IN_BODY
diff --git a/driver/runtime/rs_core.c b/driver/runtime/rs_core.c
new file mode 100644
index 0000000..54fcccb
--- /dev/null
+++ b/driver/runtime/rs_core.c
@@ -0,0 +1,204 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/* Function declarations from libRS */
+extern float4 __attribute__((overloadable)) convert_float4(uchar4 c);
+
+/* Implementation of Core Runtime */
+
+/*
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = 255;
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = (uchar)(a * 255.f + 0.5f);
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, 255};
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, color.w};
+    return c;
+}
+*/
+
+extern float4 rsUnpackColor8888(uchar4 c)
+{
+    return convert_float4(c) * 0.003921569f;
+}
+
+
+extern int32_t __attribute__((overloadable)) rsAtomicCas(volatile int32_t *ptr, int32_t expectedValue, int32_t newValue) {
+    return __sync_val_compare_and_swap(ptr, expectedValue, newValue);
+}
+
+extern uint32_t __attribute__((overloadable)) rsAtomicCas(volatile uint32_t *ptr, uint32_t expectedValue, uint32_t newValue) {
+    return __sync_val_compare_and_swap((volatile int32_t *)ptr, (int32_t)expectedValue, (int32_t)newValue);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicInc(volatile int32_t *ptr) {
+    return __sync_fetch_and_add(ptr, 1);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicDec(volatile int32_t *ptr) {
+    return __sync_fetch_and_sub(ptr, 1);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicAdd(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_add(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicSub(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_sub(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicAnd(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_and(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicOr(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_or(ptr, value);
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicXor(volatile int32_t *ptr, int32_t value) {
+    return __sync_fetch_and_xor(ptr, value);
+}
+
+extern uint32_t __attribute__((overloadable)) min(uint32_t, uint32_t);
+extern int32_t __attribute__((overloadable)) min(int32_t, int32_t);
+extern uint32_t __attribute__((overloadable)) max(uint32_t, uint32_t);
+extern int32_t __attribute__((overloadable)) max(int32_t, int32_t);
+
+extern uint32_t __attribute__((overloadable)) rsAtomicMin(volatile uint32_t *ptr, uint32_t value) {
+    uint32_t prev, status;
+    do {
+        prev = *ptr;
+        uint32_t n = min(value, prev);
+        status = rsAtomicCas((volatile int32_t*) ptr, (int32_t) prev, (int32_t)n);
+    } while (status != prev);
+    return prev;
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicMin(volatile int32_t *ptr, int32_t value) {
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        int32_t n = min(value, prev);
+        status = rsAtomicCas(ptr, prev, n);
+    } while (status != prev);
+    return prev;
+}
+
+extern uint32_t __attribute__((overloadable)) rsAtomicMax(volatile uint32_t *ptr, uint32_t value) {
+    uint32_t prev, status;
+    do {
+        prev = *ptr;
+        uint32_t n = max(value, prev);
+        status = rsAtomicCas((volatile int32_t*) ptr, (int32_t) prev, (int32_t) n);
+    } while (status != prev);
+    return prev;
+}
+
+extern int32_t __attribute__((overloadable)) rsAtomicMax(volatile int32_t *ptr, int32_t value) {
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        int32_t n = max(value, prev);
+        status = rsAtomicCas(ptr, prev, n);
+    } while (status != prev);
+    return prev;
+}
+
+
+
+extern int32_t rand();
+#define RAND_MAX 0x7fffffff
+
+
+
+extern float __attribute__((overloadable)) rsRand(float min, float max);/* {
+    float r = (float)rand();
+    r /= RAND_MAX;
+    r = r * (max - min) + min;
+    return r;
+}
+*/
+
+extern float __attribute__((overloadable)) rsRand(float max) {
+    return rsRand(0.f, max);
+    //float r = (float)rand();
+    //r *= max;
+    //r /= RAND_MAX;
+    //return r;
+}
+
+extern int __attribute__((overloadable)) rsRand(int max) {
+    return (int)rsRand((float)max);
+}
+
+extern int __attribute__((overloadable)) rsRand(int min, int max) {
+    return (int)rsRand((float)min, (float)max);
+}
+
+#define PRIM_DEBUG(T)                               \
+extern void __attribute__((overloadable)) rsDebug(const char *, const T *);     \
+void __attribute__((overloadable)) rsDebug(const char *txt, T val) {            \
+    rsDebug(txt, &val);                                                         \
+}
+
+PRIM_DEBUG(char2)
+PRIM_DEBUG(char3)
+PRIM_DEBUG(char4)
+PRIM_DEBUG(uchar2)
+PRIM_DEBUG(uchar3)
+PRIM_DEBUG(uchar4)
+PRIM_DEBUG(short2)
+PRIM_DEBUG(short3)
+PRIM_DEBUG(short4)
+PRIM_DEBUG(ushort2)
+PRIM_DEBUG(ushort3)
+PRIM_DEBUG(ushort4)
+PRIM_DEBUG(int2)
+PRIM_DEBUG(int3)
+PRIM_DEBUG(int4)
+PRIM_DEBUG(uint2)
+PRIM_DEBUG(uint3)
+PRIM_DEBUG(uint4)
+PRIM_DEBUG(long2)
+PRIM_DEBUG(long3)
+PRIM_DEBUG(long4)
+PRIM_DEBUG(ulong2)
+PRIM_DEBUG(ulong3)
+PRIM_DEBUG(ulong4)
+PRIM_DEBUG(float2)
+PRIM_DEBUG(float3)
+PRIM_DEBUG(float4)
+PRIM_DEBUG(double2)
+PRIM_DEBUG(double3)
+PRIM_DEBUG(double4)
+
+#undef PRIM_DEBUG
+
diff --git a/driver/runtime/rs_element.c b/driver/runtime/rs_element.c
new file mode 100644
index 0000000..4db5883
--- /dev/null
+++ b/driver/runtime/rs_element.c
@@ -0,0 +1,111 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Element
+*/
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementCount(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.fieldsCount;
+}
+
+extern rs_element __attribute__((overloadable))
+        rsElementGetSubElement(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    rs_element returnElem = {element->mHal.state.fields[index]};
+    return returnElem;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementNameLength(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldNameLengths[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementName(rs_element e, uint32_t index, char *name, uint32_t nameLength) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount ||
+        nameLength == 0 || name == 0) {
+        return 0;
+    }
+
+    uint32_t numToCopy = element->mHal.state.fieldNameLengths[index];
+    if (nameLength < numToCopy) {
+        numToCopy = nameLength;
+    }
+    // Place the null terminator manually, in case of partial string
+    numToCopy --;
+    name[numToCopy] = '\0';
+    const char *nameSource = element->mHal.state.fieldNames[index];
+    for (uint32_t i = 0; i < numToCopy; i ++) {
+        name[i] = nameSource[i];
+    }
+    return numToCopy;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementArraySize(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldArraySizes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementOffsetBytes(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldOffsetBytes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetBytesSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.elementSizeBytes;
+}
+
+extern rs_data_type __attribute__((overloadable))
+        rsElementGetDataType(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_TYPE_INVALID;
+    }
+    return element->mHal.state.dataType;
+}
+
+extern rs_data_kind __attribute__((overloadable))
+        rsElementGetDataKind(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_KIND_INVALID;
+    }
+    return element->mHal.state.dataKind;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetVectorSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.vectorSize;
+}
diff --git a/driver/runtime/rs_matrix.c b/driver/runtime/rs_matrix.c
new file mode 100644
index 0000000..3afccc1
--- /dev/null
+++ b/driver/runtime/rs_matrix.c
@@ -0,0 +1,314 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/* Function declarations from libRS */
+extern float4 __attribute__((overloadable)) convert_float4(uchar4 c);
+
+/* Implementation of Core Runtime */
+
+
+/////////////////////////////////////////////////////
+// Matrix ops
+/////////////////////////////////////////////////////
+
+
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix4x4 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = 0.f;
+    m->m[5] = 1.f;
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 0.f;
+    m->m[9] = 0.f;
+    m->m[10] = 1.f;
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix3x3 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = 1.f;
+    m->m[5] = 0.f;
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoadIdentity(rs_matrix2x2 *m) {
+    m->m[0] = 1.f;
+    m->m[1] = 0.f;
+    m->m[2] = 0.f;
+    m->m[3] = 1.f;
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+    m->m[4] = f[4];
+    m->m[5] = f[5];
+    m->m[6] = f[6];
+    m->m[7] = f[7];
+    m->m[8] = f[8];
+    m->m[9] = f[9];
+    m->m[10] = f[10];
+    m->m[11] = f[11];
+    m->m[12] = f[12];
+    m->m[13] = f[13];
+    m->m[14] = f[14];
+    m->m[15] = f[15];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix3x3 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+    m->m[4] = f[4];
+    m->m[5] = f[5];
+    m->m[6] = f[6];
+    m->m[7] = f[7];
+    m->m[8] = f[8];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix2x2 *m, const float *f) {
+    m->m[0] = f[0];
+    m->m[1] = f[1];
+    m->m[2] = f[2];
+    m->m[3] = f[3];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix4x4 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+    m->m[4] = s->m[4];
+    m->m[5] = s->m[5];
+    m->m[6] = s->m[6];
+    m->m[7] = s->m[7];
+    m->m[8] = s->m[8];
+    m->m[9] = s->m[9];
+    m->m[10] = s->m[10];
+    m->m[11] = s->m[11];
+    m->m[12] = s->m[12];
+    m->m[13] = s->m[13];
+    m->m[14] = s->m[14];
+    m->m[15] = s->m[15];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix3x3 *v) {
+    m->m[0] = v->m[0];
+    m->m[1] = v->m[1];
+    m->m[2] = v->m[2];
+    m->m[3] = 0.f;
+    m->m[4] = v->m[3];
+    m->m[5] = v->m[4];
+    m->m[6] = v->m[5];
+    m->m[7] = 0.f;
+    m->m[8] = v->m[6];
+    m->m[9] = v->m[7];
+    m->m[10] = v->m[8];
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix2x2 *v) {
+    m->m[0] = v->m[0];
+    m->m[1] = v->m[1];
+    m->m[2] = 0.f;
+    m->m[3] = 0.f;
+    m->m[4] = v->m[2];
+    m->m[5] = v->m[3];
+    m->m[6] = 0.f;
+    m->m[7] = 0.f;
+    m->m[8] = 0.f;
+    m->m[9] = 0.f;
+    m->m[10] = 1.f;
+    m->m[11] = 0.f;
+    m->m[12] = 0.f;
+    m->m[13] = 0.f;
+    m->m[14] = 0.f;
+    m->m[15] = 1.f;
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix3x3 *m, const rs_matrix3x3 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+    m->m[4] = s->m[4];
+    m->m[5] = s->m[5];
+    m->m[6] = s->m[6];
+    m->m[7] = s->m[7];
+    m->m[8] = s->m[8];
+}
+extern void __attribute__((overloadable))
+rsMatrixLoad(rs_matrix2x2 *m, const rs_matrix2x2 *s) {
+    m->m[0] = s->m[0];
+    m->m[1] = s->m[1];
+    m->m[2] = s->m[2];
+    m->m[3] = s->m[3];
+}
+
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix4x4 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 4 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix4x4 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 4 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix3x3 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 3 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix3x3 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 3 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix2x2 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 2 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix2x2 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 2 + col];
+}
+
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix2x2 *m, float2 in) {
+    float2 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[2] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[3] * in.y);
+    return ret;
+}
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix2x2 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix2x2 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float4 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix4x4 *ret, const rs_matrix4x4 *lhs, const rs_matrix4x4 *rhs) {
+    for (int i=0 ; i<4 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        float ri2 = 0;
+        float ri3 = 0;
+        for (int j=0 ; j<4 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+            ri2 += rsMatrixGet(lhs, j, 2) * rhs_ij;
+            ri3 += rsMatrixGet(lhs, j, 3) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+        rsMatrixSet(ret, i, 2, ri2);
+        rsMatrixSet(ret, i, 3, ri3);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *lhs, const rs_matrix4x4 *rhs) {
+    rs_matrix4x4 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix3x3 *ret, const rs_matrix3x3 *lhs, const rs_matrix3x3 *rhs) {
+    for (int i=0 ; i<3 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        float ri2 = 0;
+        for (int j=0 ; j<3 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+            ri2 += rsMatrixGet(lhs, j, 2) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+        rsMatrixSet(ret, i, 2, ri2);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *lhs, const rs_matrix3x3 *rhs) {
+    rs_matrix3x3 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
+extern void __attribute__((overloadable))
+rsMatrixLoadMultiply(rs_matrix2x2 *ret, const rs_matrix2x2 *lhs, const rs_matrix2x2 *rhs) {
+    for (int i=0 ; i<2 ; i++) {
+        float ri0 = 0;
+        float ri1 = 0;
+        for (int j=0 ; j<2 ; j++) {
+            const float rhs_ij = rsMatrixGet(rhs, i, j);
+            ri0 += rsMatrixGet(lhs, j, 0) * rhs_ij;
+            ri1 += rsMatrixGet(lhs, j, 1) * rhs_ij;
+        }
+        rsMatrixSet(ret, i, 0, ri0);
+        rsMatrixSet(ret, i, 1, ri1);
+    }
+}
+
+extern void __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix2x2 *lhs, const rs_matrix2x2 *rhs) {
+    rs_matrix2x2 r;
+    rsMatrixLoadMultiply(&r, lhs, rhs);
+    rsMatrixLoad(lhs, &r);
+}
+
diff --git a/driver/runtime/rs_mesh.c b/driver/runtime/rs_mesh.c
new file mode 100644
index 0000000..bb533bc
--- /dev/null
+++ b/driver/runtime/rs_mesh.c
@@ -0,0 +1,55 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Mesh
+*/
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetVertexAllocationCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.vertexBuffersCount;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetPrimitiveCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.primitivesCount;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetVertexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.vertexBuffersCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.vertexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetIndexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.indexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_primitive __attribute__((overloadable))
+        rsgMeshGetPrimitive(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        return RS_PRIMITIVE_INVALID;
+    }
+    return mesh->mHal.state.primitives[index];
+}
diff --git a/driver/runtime/rs_program.c b/driver/runtime/rs_program.c
new file mode 100644
index 0000000..64c656f
--- /dev/null
+++ b/driver/runtime/rs_program.c
@@ -0,0 +1,108 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Program Store
+*/
+extern rs_depth_func __attribute__((overloadable))
+        rsgProgramStoreGetDepthFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_DEPTH_FUNC_INVALID;
+    }
+    return prog->mHal.state.depthFunc;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDepthMaskEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.depthWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskRedEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorRWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskGreenEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorGWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskBlueEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorBWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskAlphaEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorAWriteEnable;
+}
+
+extern rs_blend_src_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendSrcFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_SRC_INVALID;
+    }
+    return prog->mHal.state.blendSrc;
+}
+
+extern rs_blend_dst_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendDstFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_DST_INVALID;
+    }
+    return prog->mHal.state.blendDst;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDitherEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.ditherEnable;
+}
+
+/**
+* Program Raster
+*/
+extern bool __attribute__((overloadable))
+        rsgProgramRasterIsPointSpriteEnabled(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.pointSprite;
+}
+
+extern rs_cull_mode __attribute__((overloadable))
+        rsgProgramRasterGetCullMode(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return RS_CULL_INVALID;
+    }
+    return prog->mHal.state.cull;
+}
diff --git a/driver/runtime/rs_sample.c b/driver/runtime/rs_sample.c
new file mode 100644
index 0000000..2cd5bdc
--- /dev/null
+++ b/driver/runtime/rs_sample.c
@@ -0,0 +1,654 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+
+// 565 Conversion bits taken from SkBitmap
+#define SK_R16_BITS     5
+#define SK_G16_BITS     6
+#define SK_B16_BITS     5
+
+#define SK_R16_SHIFT    (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT    (SK_B16_BITS)
+#define SK_B16_SHIFT    0
+
+#define SK_R16_MASK     ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK     ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK     ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color)   (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color)   (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color)   (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+    return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+    return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+    return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c)      SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c)      SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c)      SkB16ToB32(SkGetPackedB16(c))
+
+static float3 getFrom565(uint16_t color) {
+    float3 result;
+    result.x = (float)SkPacked16ToR32(color);
+    result.y = (float)SkPacked16ToG32(color);
+    result.z = (float)SkPacked16ToB32(color);
+    return result;
+}
+
+/**
+* Allocation sampling
+*/
+static inline float __attribute__((overloadable))
+        getElementAt1(const uint8_t *p, int32_t x) {
+    float r = p[x];
+    return r;
+}
+
+static inline float2 __attribute__((overloadable))
+        getElementAt2(const uint8_t *p, int32_t x) {
+    x *= 2;
+    float2 r = {p[x], p[x+1]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt3(const uint8_t *p, int32_t x) {
+    x *= 4;
+    float3 r = {p[x], p[x+1], p[x+2]};
+    return r;
+}
+
+static inline float4 __attribute__((overloadable))
+        getElementAt4(const uint8_t *p, int32_t x) {
+    x *= 4;
+    const uchar4 *p2 = (const uchar4 *)&p[x];
+    return convert_float4(p2[0]);
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt565(const uint8_t *p, int32_t x) {
+    x *= 2;
+    float3 r = getFrom565(((const uint16_t *)p)[0]);
+    return r;
+}
+
+static inline float __attribute__((overloadable))
+        getElementAt1(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    float r = p[x];
+    return r;
+}
+
+static inline float2 __attribute__((overloadable))
+        getElementAt2(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 2;
+    float2 r = {p[x], p[x+1]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt3(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 4;
+    float3 r = {p[x], p[x+1], p[x+2]};
+    return r;
+}
+
+static inline float4 __attribute__((overloadable))
+        getElementAt4(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 4;
+    float4 r = {p[x], p[x+1], p[x+2], p[x+3]};
+    return r;
+}
+
+static inline float3 __attribute__((overloadable))
+        getElementAt565(const uint8_t *p, size_t stride, int32_t x, int32_t y) {
+    p += y * stride;
+    x *= 2;
+    float3 r = getFrom565(((const uint16_t *)p)[0]);
+    return r;
+}
+
+
+
+
+
+static float4 __attribute__((overloadable))
+            getSample_A(const uint8_t *p, int32_t iPixel,
+                          int32_t next, float w0, float w1) {
+    float p0 = getElementAt1(p, iPixel);
+    float p1 = getElementAt1(p, next);
+    float r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {0.f, 0.f, 0.f, r};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_L(const uint8_t *p, int32_t iPixel,
+                          int32_t next, float w0, float w1) {
+    float p0 = getElementAt1(p, iPixel);
+    float p1 = getElementAt1(p, next);
+    float r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r, r, r, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_LA(const uint8_t *p, int32_t iPixel,
+                           int32_t next, float w0, float w1) {
+    float2 p0 = getElementAt2(p, iPixel);
+    float2 p1 = getElementAt2(p, next);
+    float2 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.x, r.y};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGB(const uint8_t *p, int32_t iPixel,
+                            int32_t next, float w0, float w1) {
+    float3 p0 = getElementAt3(p, iPixel);
+    float3 p1 = getElementAt3(p, next);
+    float3 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_565(const uint8_t *p, int32_t iPixel,
+                           int32_t next, float w0, float w1) {
+    float3 p0 = getElementAt565(p, iPixel);
+    float3 p1 = getElementAt565(p, next);
+    float3 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGBA(const uint8_t *p, int32_t iPixel,
+                             int32_t next, float w0, float w1) {
+    float4 p0 = getElementAt4(p, iPixel);
+    float4 p1 = getElementAt4(p, next);
+    float4 r = p0 * w0 + p1 * w1;
+    r *= (1.f / 255.f);
+    return r;
+}
+
+
+static float4 __attribute__((overloadable))
+            getSample_A(const uint8_t *p, size_t stride,
+                          int locX, int locY, int nextX, int nextY,
+                          float w0, float w1, float w2, float w3) {
+    float p0 = getElementAt1(p, stride, locX, locY);
+    float p1 = getElementAt1(p, stride, nextX, locY);
+    float p2 = getElementAt1(p, stride, locX, nextY);
+    float p3 = getElementAt1(p, stride, nextX, nextY);
+    float r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {0.f, 0.f, 0.f, r};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_L(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float p0 = getElementAt1(p, stride, locX, locY);
+    float p1 = getElementAt1(p, stride, nextX, locY);
+    float p2 = getElementAt1(p, stride, locX, nextY);
+    float p3 = getElementAt1(p, stride, nextX, nextY);
+    float r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r, r, r, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_LA(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float2 p0 = getElementAt2(p, stride, locX, locY);
+    float2 p1 = getElementAt2(p, stride, nextX, locY);
+    float2 p2 = getElementAt2(p, stride, locX, nextY);
+    float2 p3 = getElementAt2(p, stride, nextX, nextY);
+    float2 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.x, r.x, r.y};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGB(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float4 p0 = getElementAt4(p, stride, locX, locY);
+    float4 p1 = getElementAt4(p, stride, nextX, locY);
+    float4 p2 = getElementAt4(p, stride, locX, nextY);
+    float4 p3 = getElementAt4(p, stride, nextX, nextY);
+    float4 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret = {r.x, r.y, r.z, 1.f};
+    return ret;
+}
+static float4 __attribute__((overloadable))
+            getSample_RGBA(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float4 p0 = getElementAt4(p, stride, locX, locY);
+    float4 p1 = getElementAt4(p, stride, nextX, locY);
+    float4 p2 = getElementAt4(p, stride, locX, nextY);
+    float4 p3 = getElementAt4(p, stride, nextX, nextY);
+    float4 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    return r;
+}
+static float4 __attribute__((overloadable))
+            getSample_565(const uint8_t *p, size_t stride,
+                         int locX, int locY, int nextX, int nextY,
+                         float w0, float w1, float w2, float w3) {
+    float3 p0 = getElementAt565(p, stride, locX, locY);
+    float3 p1 = getElementAt565(p, stride, nextX, locY);
+    float3 p2 = getElementAt565(p, stride, locX, nextY);
+    float3 p3 = getElementAt565(p, stride, nextX, nextY);
+    float3 r = p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3;
+    r *= (1.f / 255.f);
+    float4 ret;
+    ret.rgb = r;
+    ret.w = 1.f;
+    return ret;
+}
+
+static float4 __attribute__((overloadable))
+        getBilinearSample1D(const Allocation_t *alloc, float2 weights,
+                          uint32_t iPixel, uint32_t next,
+                          rs_data_kind dk, rs_data_type dt, uint32_t lod) {
+
+     const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+     switch(dk) {
+     case RS_KIND_PIXEL_RGBA:
+         return getSample_RGBA(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_A:
+         return getSample_A(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_RGB:
+         if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+             return getSample_565(p, iPixel, next, weights.x, weights.y);
+         }
+         return getSample_RGB(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_L:
+         return getSample_L(p, iPixel, next, weights.x, weights.y);
+     case RS_KIND_PIXEL_LA:
+         return getSample_LA(p, iPixel, next, weights.x, weights.y);
+
+     default:
+         //__builtin_unreachable();
+         break;
+     }
+
+     //__builtin_unreachable();
+     return 0.f;
+}
+
+static uint32_t wrapI(rs_sampler_value wrap, int32_t coord, int32_t size) {
+    if (wrap == RS_SAMPLER_WRAP) {
+        coord = coord % size;
+        if (coord < 0) {
+            coord += size;
+        }
+    }
+    if (wrap == RS_SAMPLER_MIRRORED_REPEAT) {
+        coord = coord % (size * 2);
+        if (coord < 0) {
+            coord = (size * 2) + coord;
+        }
+        if (coord >= size) {
+            coord = (size * 2) - coord;
+        }
+    }
+    return (uint32_t)max(0, min(coord, size - 1));
+}
+
+static float4 __attribute__((overloadable))
+        getBilinearSample2D(const Allocation_t *alloc, float w0, float w1, float w2, float w3,
+                          int lx, int ly, int nx, int ny,
+                          rs_data_kind dk, rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+    size_t stride = alloc->mHal.drvState.lod[lod].stride;
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        return getSample_RGBA(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_A:
+        return getSample_A(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_LA:
+        return getSample_LA(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            return getSample_565(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+        }
+        return getSample_RGB(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+    case RS_KIND_PIXEL_L:
+        return getSample_L(p, stride, lx, ly, nx, ny, w0, w1, w2, w3);
+
+    default:
+        break;
+    }
+
+    return 0.f;
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(const Allocation_t *alloc, uint32_t iPixel, rs_data_kind dk,
+                         rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    float4 result = {0.f, 0.f, 0.f, 255.f};
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        result = getElementAt4(p, iPixel);
+        break;
+    case RS_KIND_PIXEL_A:
+        result.w = getElementAt1(p, iPixel);
+        break;
+    case RS_KIND_PIXEL_LA:
+        result.zw = getElementAt2(p, iPixel);
+        result.xy = result.z;
+        break;
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            result.xyz = getElementAt565(p, iPixel);
+        } else {
+            result.xyz = getElementAt3(p, iPixel);
+        }
+        break;
+    case RS_KIND_PIXEL_L:
+        result.xyz = getElementAt1(p, iPixel);
+
+    default:
+        //__builtin_unreachable();
+        break;
+    }
+
+    return result * 0.003921569f;
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(const Allocation_t *alloc, uint2 iPixel, rs_data_kind dk,
+                         rs_data_type dt, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+    size_t stride = alloc->mHal.drvState.lod[lod].stride;
+
+    float4 result = {0.f, 0.f, 0.f, 255.f};
+
+    switch(dk) {
+    case RS_KIND_PIXEL_RGBA:
+        result = getElementAt4(p, stride, iPixel.x, iPixel.y);
+        break;
+    case RS_KIND_PIXEL_A:
+        result.w = getElementAt1(p, stride, iPixel.x, iPixel.y);
+        break;
+    case RS_KIND_PIXEL_LA:
+        result.zw = getElementAt2(p, stride, iPixel.x, iPixel.y);
+        result.xy = result.z;
+        break;
+    case RS_KIND_PIXEL_RGB:
+        if (dt == RS_TYPE_UNSIGNED_5_6_5) {
+            result.xyz = getElementAt565(p, stride, iPixel.x, iPixel.y);
+        } else {
+            result.xyz = getElementAt3(p, stride, iPixel.x, iPixel.y);
+        }
+        break;
+
+    default:
+        //__builtin_unreachable();
+        break;
+    }
+
+    return result * 0.003921569f;
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(const Allocation_t *alloc,
+                               rs_data_kind dk, rs_data_type dt,
+                               rs_sampler_value wrapS,
+                               float uv, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    int32_t sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    float pixelUV = uv * (float)(sourceW);
+    int32_t iPixel = (int32_t)(pixelUV);
+    float frac = pixelUV - (float)iPixel;
+
+    if (frac < 0.5f) {
+        iPixel -= 1;
+        frac += 0.5f;
+    } else {
+        frac -= 0.5f;
+    }
+
+    float oneMinusFrac = 1.0f - frac;
+
+    float2 weights;
+    weights.x = oneMinusFrac;
+    weights.y = frac;
+
+    uint32_t next = wrapI(wrapS, iPixel + 1, sourceW);
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getBilinearSample1D(alloc, weights, location, next, dk, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(const Allocation_t *alloc,
+                                rs_data_kind dk, rs_data_type dt,
+                                rs_sampler_value wrapS,
+                                float uv, uint32_t lod) {
+
+    int32_t sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int32_t iPixel = (int32_t)(uv * (float)(sourceW));
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getNearestSample(alloc, location, dk, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(const Allocation_t *alloc,
+                               rs_data_kind dk, rs_data_type dt,
+                               rs_sampler_value wrapS,
+                               rs_sampler_value wrapT,
+                               float2 uv, uint32_t lod) {
+
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
+
+    int sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int sourceH = alloc->mHal.drvState.lod[lod].dimY;
+
+    float pixelU = uv.x * sourceW;
+    float pixelV = uv.y * sourceH;
+    int iPixelU = pixelU;
+    int iPixelV = pixelV;
+    float fracU = pixelU - iPixelU;
+    float fracV = pixelV - iPixelV;
+
+    if (fracU < 0.5f) {
+        iPixelU -= 1;
+        fracU += 0.5f;
+    } else {
+        fracU -= 0.5f;
+    }
+    if (fracV < 0.5f) {
+        iPixelV -= 1;
+        fracV += 0.5f;
+    } else {
+        fracV -= 0.5f;
+    }
+    float oneMinusFracU = 1.0f - fracU;
+    float oneMinusFracV = 1.0f - fracV;
+
+    float w0 = oneMinusFracU * oneMinusFracV;
+    float w1 = fracU * oneMinusFracV;
+    float w2 = oneMinusFracU * fracV;
+    float w3 = fracU * fracV;
+
+    int nx = wrapI(wrapS, iPixelU + 1, sourceW);
+    int ny = wrapI(wrapT, iPixelV + 1, sourceH);
+    int lx = wrapI(wrapS, iPixelU, sourceW);
+    int ly = wrapI(wrapT, iPixelV, sourceH);
+
+    return getBilinearSample2D(alloc, w0, w1, w2, w3, lx, ly, nx, ny, dk, dt, lod);
+
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(const Allocation_t *alloc,
+                                rs_data_kind dk, rs_data_type dt,
+                                rs_sampler_value wrapS,
+                                rs_sampler_value wrapT,
+                                float2 uv, uint32_t lod) {
+    int sourceW = alloc->mHal.drvState.lod[lod].dimX;
+    int sourceH = alloc->mHal.drvState.lod[lod].dimY;
+
+    float2 dimF;
+    dimF.x = (float)(sourceW);
+    dimF.y = (float)(sourceH);
+    int2 iPixel = convert_int2(uv * dimF);
+
+    uint2 location;
+    location.x = wrapI(wrapS, iPixel.x, sourceW);
+    location.y = wrapI(wrapT, iPixel.y, sourceH);
+    return getNearestSample(alloc, location, dk, dt, lod);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float uv, float lod) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value sampleMin = prog->mHal.state.minFilter;
+    rs_sampler_value sampleMag = prog->mHal.state.magFilter;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (lod <= 0.0f) {
+        if (sampleMag == RS_SAMPLER_NEAREST) {
+            return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, uv, 0);
+        }
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, 0);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_NEAREST) {
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod = min(lod, (float)maxLOD);
+        uint32_t nearestLOD = (uint32_t)round(lod);
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, nearestLOD);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_LINEAR) {
+        uint32_t lod0 = (uint32_t)floor(lod);
+        uint32_t lod1 = (uint32_t)ceil(lod);
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod0 = min(lod0, maxLOD);
+        lod1 = min(lod1, maxLOD);
+        float4 sample0 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, lod0);
+        float4 sample1 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, uv, lod1);
+        float frac = lod - (float)lod0;
+        return sample0 * (1.0f - frac) + sample1 * frac;
+    }
+
+    return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, uv, 0);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float location) {
+    return rsSample(a, s, location, 0);
+}
+
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 uv, float lod) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value sampleMin = prog->mHal.state.minFilter;
+    rs_sampler_value sampleMag = prog->mHal.state.magFilter;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+    rs_sampler_value wrapT = prog->mHal.state.wrapT;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (lod <= 0.0f) {
+        if (sampleMag == RS_SAMPLER_NEAREST) {
+            return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+        }
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_NEAREST) {
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod = min(lod, (float)maxLOD);
+        uint32_t nearestLOD = (uint32_t)round(lod);
+        return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, nearestLOD);
+    }
+
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_LINEAR) {
+        uint32_t lod0 = (uint32_t)floor(lod);
+        uint32_t lod1 = (uint32_t)ceil(lod);
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;
+        lod0 = min(lod0, maxLOD);
+        lod1 = min(lod1, maxLOD);
+        float4 sample0 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, lod0);
+        float4 sample1 = sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, lod1);
+        float frac = lod - (float)lod0;
+        return sample0 * (1.0f - frac) + sample1 * frac;
+    }
+
+    return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 uv) {
+
+    const Allocation_t *alloc = (const Allocation_t *)a.p;
+    const Sampler_t *prog = (Sampler_t *)s.p;
+    const Type_t *type = (Type_t *)alloc->mHal.state.type;
+    const Element_t *elem = type->mHal.state.element;
+    rs_data_kind dk = elem->mHal.state.dataKind;
+    rs_data_type dt = elem->mHal.state.dataType;
+    rs_sampler_value wrapS = prog->mHal.state.wrapS;
+    rs_sampler_value wrapT = prog->mHal.state.wrapT;
+
+    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE)) {
+        return 0.f;
+    }
+
+    if (prog->mHal.state.magFilter == RS_SAMPLER_NEAREST) {
+        return sample_LOD_NearestPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+    }
+    return sample_LOD_LinearPixel(alloc, dk, dt, wrapS, wrapT, uv, 0);
+}
+
diff --git a/driver/runtime/rs_sampler.c b/driver/runtime/rs_sampler.c
new file mode 100644
index 0000000..39782de
--- /dev/null
+++ b/driver/runtime/rs_sampler.c
@@ -0,0 +1,51 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Sampler
+*/
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMinification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.minFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMagnification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.magFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapS(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapS;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapT(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapT;
+}
+
+extern float __attribute__((overloadable))
+        rsSamplerGetAnisotropy(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return 0.0f;
+    }
+    return prog->mHal.state.aniso;
+}
diff --git a/driver/runtime/rs_structs.h b/driver/runtime/rs_structs.h
new file mode 100644
index 0000000..6db4279
--- /dev/null
+++ b/driver/runtime/rs_structs.h
@@ -0,0 +1,262 @@
+#ifndef _RS_STRUCTS_H_
+#define _RS_STRUCTS_H_
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Allocation owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsAllocation.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetDimX(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * allocations.
+ *
+ *****************************************************************************/
+typedef enum {
+    RS_ALLOCATION_MIPMAP_NONE = 0,
+    RS_ALLOCATION_MIPMAP_FULL = 1,
+    RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE = 2
+} rs_allocation_mipmap_control;
+
+typedef struct Allocation {
+    char __pad[32];
+    struct {
+        void * drv;
+        struct {
+            const void *type;
+            uint32_t usageFlags;
+            rs_allocation_mipmap_control mipmapControl;
+            uint32_t yuv;
+            uint32_t elementSizeBytes;
+            bool hasMipmaps;
+            bool hasFaces;
+            bool hasReferences;
+            void * usrPtr;
+            int32_t surfaceTextureID;
+            void * wndSurface;
+            void * surfaceTexture;
+        } state;
+
+        struct DrvState {
+            struct LodState {
+                void * mallocPtr;
+                size_t stride;
+                uint32_t dimX;
+                uint32_t dimY;
+                uint32_t dimZ;
+            } lod[16/*android::renderscript::Allocation::MAX_LOD*/];
+            size_t faceOffset;
+            uint32_t lodCount;
+            uint32_t faceCount;
+        } drvState;
+    } mHal;
+} Allocation_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramStore owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramStore.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramStoreGetDepthFunc(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program store.
+ *
+ *****************************************************************************/
+typedef struct ProgramStore {
+    char __pad[40];
+    struct {
+        struct {
+            bool ditherEnable;
+            bool colorRWriteEnable;
+            bool colorGWriteEnable;
+            bool colorBWriteEnable;
+            bool colorAWriteEnable;
+            rs_blend_src_func blendSrc;
+            rs_blend_dst_func blendDst;
+            bool depthWriteEnable;
+            rs_depth_func depthFunc;
+        } state;
+    } mHal;
+} ProgramStore_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramRaster owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramRaster.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetCullMode(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program raster.
+ *
+ *****************************************************************************/
+typedef struct ProgramRaster {
+    char __pad[36];
+    struct {
+        void * drv;
+        struct {
+            bool pointSprite;
+            rs_cull_mode cull;
+        } state;
+    } mHal;
+} ProgramRaster_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Sampler owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsSampler.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetMagFilter(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * samplers.
+ *
+ *****************************************************************************/
+typedef struct Sampler {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            rs_sampler_value magFilter;
+            rs_sampler_value minFilter;
+            rs_sampler_value wrapS;
+            rs_sampler_value wrapT;
+            rs_sampler_value wrapR;
+            float aniso;
+        } state;
+    } mHal;
+} Sampler_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Element owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsElement.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsElementGetSubElementCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * elements.
+ *
+ *****************************************************************************/
+typedef struct Element {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            rs_data_type dataType;
+            rs_data_kind dataKind;
+            uint32_t vectorSize;
+            uint32_t elementSizeBytes;
+
+            // Subelements
+            const void **fields;
+            uint32_t *fieldArraySizes;
+            const char **fieldNames;
+            uint32_t *fieldNameLengths;
+            uint32_t *fieldOffsetBytes;
+            uint32_t fieldsCount;
+        } state;
+    } mHal;
+} Element_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Type owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsType.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetElement(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * types.
+ *
+ *****************************************************************************/
+typedef struct Type {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            const void * element;
+            uint32_t dimX;
+            uint32_t dimY;
+            uint32_t dimZ;
+            uint32_t *lodDimX;
+            uint32_t *lodDimY;
+            uint32_t *lodDimZ;
+            uint32_t *lodOffset;
+            uint32_t lodCount;
+            bool faces;
+        } state;
+    } mHal;
+} Type_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Mesh owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsMesh.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsMeshGetVertexAllocationCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * meshes.
+ *
+ *****************************************************************************/
+typedef struct Mesh {
+    char __pad[32];
+    struct {
+        void *drv;
+        struct {
+            void **vertexBuffers;
+            uint32_t vertexBuffersCount;
+
+            // indexBuffers[i] could be NULL, in which case only primitives[i] is used
+            void **indexBuffers;
+            uint32_t indexBuffersCount;
+            rs_primitive *primitives;
+            uint32_t primitivesCount;
+        } state;
+    } mHal;
+} Mesh_t;
+#endif // _RS_CORE_H_
diff --git a/driver/linkloader/tests/images/clean-testcases.sh b/java/tests/GenImages/Android.mk
old mode 100755
new mode 100644
similarity index 63%
rename from driver/linkloader/tests/images/clean-testcases.sh
rename to java/tests/GenImages/Android.mk
index 1c2d0be..69ef52c
--- a/driver/linkloader/tests/images/clean-testcases.sh
+++ b/java/tests/GenImages/Android.mk
@@ -1,7 +1,5 @@
-#!/bin/bash -e
-
-
-# Copyright (C) 2011 The Android Open Source Project
+#
+# Copyright (C) 2013 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,5 +12,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+#
 
-rm -rf *.o *.bc
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := $(call all-java-files-under, src) \
+                   $(call all-renderscript-files-under, src)
+
+LOCAL_PACKAGE_NAME := RsGenImages
+
+include $(BUILD_PACKAGE)
diff --git a/java/tests/GenImages/AndroidManifest.xml b/java/tests/GenImages/AndroidManifest.xml
new file mode 100644
index 0000000..c7b690e
--- /dev/null
+++ b/java/tests/GenImages/AndroidManifest.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2013 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="com.android.rs.genimage"
+    android:versionCode="1"
+    android:versionName="1.0" >
+    <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
+
+    <uses-sdk android:minSdkVersion="14" />
+    <!-- Tell the system this app requires OpenGL ES 2.0. -->
+    <uses-feature android:glEsVersion="0x00020000" android:required="true" />
+
+    <application
+        android:icon="@drawable/ic_launcher"
+        android:label="@string/app_name" >
+        <activity
+            android:name="com.android.rs.genimage.GenImageAct"
+            android:label="@string/app_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.intent.category.LAUNCHER" />
+            </intent-filter>
+        </activity>
+    </application>
+
+</manifest>
diff --git a/java/tests/GenImages/res/drawable-nodpi/test_pattern.png b/java/tests/GenImages/res/drawable-nodpi/test_pattern.png
new file mode 100644
index 0000000..e7d1455
--- /dev/null
+++ b/java/tests/GenImages/res/drawable-nodpi/test_pattern.png
Binary files differ
diff --git a/java/tests/GenImages/res/drawable/ic_launcher.png b/java/tests/GenImages/res/drawable/ic_launcher.png
new file mode 100644
index 0000000..359047d
--- /dev/null
+++ b/java/tests/GenImages/res/drawable/ic_launcher.png
Binary files differ
diff --git a/java/tests/GenImages/res/layout/main.xml b/java/tests/GenImages/res/layout/main.xml
new file mode 100644
index 0000000..b8db6d8
--- /dev/null
+++ b/java/tests/GenImages/res/layout/main.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2013 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="fill_parent"
+    android:layout_height="fill_parent"
+    android:orientation="vertical" >
+
+    <TextView
+        android:layout_width="fill_parent"
+        android:layout_height="wrap_content"
+        android:text="@string/hello" />
+
+</LinearLayout>
diff --git a/java/tests/GenImages/res/values/strings.xml b/java/tests/GenImages/res/values/strings.xml
new file mode 100644
index 0000000..a0cc805
--- /dev/null
+++ b/java/tests/GenImages/res/values/strings.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2013 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<resources>
+
+    <string name="hello">Hello</string>
+    <string name="app_name">RS Image Generator</string>
+
+</resources>
diff --git a/java/tests/GenImages/src/com/android/rs/genimage/GenImage.java b/java/tests/GenImages/src/com/android/rs/genimage/GenImage.java
new file mode 100644
index 0000000..f238095
--- /dev/null
+++ b/java/tests/GenImages/src/com/android/rs/genimage/GenImage.java
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.rs.genimage;
+
+import android.content.Context;
+
+import java.io.FileOutputStream;
+import java.nio.ByteBuffer;
+import java.nio.IntBuffer;
+import java.nio.ByteOrder;
+import java.nio.FloatBuffer;
+
+import android.graphics.Bitmap;
+import android.graphics.BitmapFactory;
+
+import javax.microedition.khronos.egl.EGLConfig;
+import javax.microedition.khronos.opengles.GL10;
+import android.opengl.GLUtils;
+
+import android.opengl.GLES20;
+import android.opengl.GLSurfaceView;
+
+public class GenImage implements GLSurfaceView.Renderer {
+    private Bitmap mTestImage;
+
+    private Triangle mTriangle;
+
+
+    private Bitmap loadBitmap(Context context, int resource) {
+        final BitmapFactory.Options options = new BitmapFactory.Options();
+        options.inPreferredConfig = Bitmap.Config.ARGB_8888;
+        return BitmapFactory.decodeResource(context.getResources(), resource, options);
+    }
+
+    GenImage(Context context) {
+
+        mTestImage = loadBitmap(context, R.drawable.test_pattern);
+
+    }
+
+    @Override
+    public void onSurfaceCreated(GL10 unused, EGLConfig config) {
+
+        // Set the background frame color
+        GLES20.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+
+        mTriangle = new Triangle(mTestImage);
+    }
+
+    @Override
+    public void onDrawFrame(GL10 unused) {
+
+        // Draw background color
+        GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+
+        // Draw triangle
+        mTriangle.draw();
+    }
+
+    @Override
+    public void onSurfaceChanged(GL10 unused, int width, int height) {
+        // Adjust the viewport based on geometry changes,
+        // such as screen rotation
+        GLES20.glViewport(0, 0, 512, 512);
+    }
+
+    public static int loadShader(int type, String shaderCode){
+
+        // create a vertex shader type (GLES20.GL_VERTEX_SHADER)
+        // or a fragment shader type (GLES20.GL_FRAGMENT_SHADER)
+        int shader = GLES20.glCreateShader(type);
+
+        // add the source code to the shader and compile it
+        GLES20.glShaderSource(shader, shaderCode);
+        GLES20.glCompileShader(shader);
+
+        return shader;
+    }
+
+}
+
+
+
+class Triangle {
+    int mTextureIDs[] = new int[1];
+
+    private final String vertexShaderCode =
+        "varying vec2 vTex0;" +
+        "varying vec2 vPos0;" +
+        "attribute vec4 aPosition;" +
+        "void main() {" +
+        "  gl_Position = aPosition;" +
+        "  vPos0 = aPosition.xy;" +
+        "  vTex0 = ((aPosition.xy + 1.0) * 0.6);" +
+        //"  vTex0 = (aPosition.xy * 1.7) + 0.5;" +
+        "}";
+
+    private final String fragmentShaderCode =
+        "precision mediump float;" +
+        "varying vec2 vTex0;" +
+        "varying vec2 vPos0;" +
+        "uniform sampler2D uSamp;" +
+        "void main() {" +
+        "  vec2 tc = vTex0;" +
+        //"  tc.x *= pow(vPos0.y + 1.0, 2.0);" +
+        //"  tc.y *= pow(vPos0.x + 1.0, 2.0);" +
+        "  vec4 c = texture2D(uSamp, tc);" +
+        "  c.a = 1.0;" +
+        "  gl_FragColor = c;" +
+        "}";
+
+    private final FloatBuffer vertexBuffer;
+    private final int mProgram;
+
+    // number of coordinates per vertex in this array
+    static float triangleCoords[] = { // in counterclockwise order:
+       -1.0f,  1.0f, 0.0f,   // top left
+       -1.0f, -1.0f, 0.0f,   // bottom left
+        1.0f, -1.0f, 0.0f,   // bottom right
+
+       -1.0f,  1.0f, 0.0f,   // top left
+        1.0f, -1.0f, 0.0f,   // bottom right
+        1.0f,  1.0f, 0.0f    // top right
+    };
+
+    FloatBuffer createFloatBuffer(float buf[]) {
+        ByteBuffer bb = ByteBuffer.allocateDirect(buf.length * 4);
+        bb.order(ByteOrder.nativeOrder());
+        FloatBuffer fb = bb.asFloatBuffer();
+        fb.put(buf);
+        fb.position(0);
+        return fb;
+    }
+
+    public String setup(int key) {
+        String s = new String();
+        int tmp;
+
+        tmp = key % 2;
+        key /= 2;
+        if (tmp != 0) {
+            s += "N";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
+        } else {
+            s += "L";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR);
+        }
+
+        tmp = key % 2;
+        key /= 2;
+        if (tmp != 0) {
+            s += "N";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
+        } else {
+            s += "L";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
+        }
+
+        tmp = key % 3;
+        key /= 3;
+        switch(tmp) {
+        case 0:
+            s += "_CE";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+            break;
+        case 1:
+            s += "_RE";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_REPEAT);
+            break;
+        case 2:
+            s += "_MR";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_MIRRORED_REPEAT);
+            break;
+        }
+
+        tmp = key % 3;
+        key /= 3;
+        switch(tmp) {
+        case 0:
+            s += "_CE";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+            break;
+        case 1:
+            s += "_RE";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_REPEAT);
+            break;
+        case 2:
+            s += "_MR";
+            GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_MIRRORED_REPEAT);
+            break;
+        }
+
+        if (key > 0) done = true;
+        return s;
+    }
+
+    public Triangle(Bitmap testImage) {
+        vertexBuffer = createFloatBuffer(triangleCoords);
+
+        // prepare shaders and OpenGL program
+        int vertexShader = GenImage.loadShader(GLES20.GL_VERTEX_SHADER, vertexShaderCode);
+        int fragmentShader = GenImage.loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentShaderCode);
+
+        mProgram = GLES20.glCreateProgram();             // create empty OpenGL Program
+        GLES20.glAttachShader(mProgram, vertexShader);   // add the vertex shader to program
+        GLES20.glAttachShader(mProgram, fragmentShader); // add the fragment shader to program
+        GLES20.glLinkProgram(mProgram);                  // create OpenGL program executables
+
+        GLES20.glGenTextures(1, mTextureIDs, 0);
+
+        // Bind to the texture in OpenGL
+        GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextureIDs[0]);
+        GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, testImage, 0);
+    }
+
+    boolean done = false;
+    int key = 0;
+
+    public void draw() {
+        GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+        String ext = setup(key++);
+
+        // Add program to OpenGL environment
+        GLES20.glUseProgram(mProgram);
+
+        int posA = GLES20.glGetAttribLocation(mProgram, "aPosition");
+        GLES20.glEnableVertexAttribArray(posA);
+        GLES20.glVertexAttribPointer(posA, 3, GLES20.GL_FLOAT, false, 12, vertexBuffer);
+
+        int sampUni = GLES20.glGetUniformLocation(mProgram, "uSamp");
+        GLES20.glUniform1i(sampUni, 0);
+
+        // Draw the triangle
+        GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, triangleCoords.length / 3);
+
+        if (!done) {
+            IntBuffer ib = IntBuffer.allocate(512*512);
+            ib.position(0);
+            GLES20.glReadPixels(0,0, 512, 512, GLES20.GL_RGBA,
+                                GLES20.GL_UNSIGNED_BYTE, ib);
+
+            Bitmap bmp = Bitmap.createBitmap(512, 512, Bitmap.Config.ARGB_8888);
+            bmp.setPixels(ib.array(), 0, 512, 0, 0, 512, 512);
+
+            try {
+                String s = new String("/sdcard/imgs/RsSampImg_");
+                s += ext + ".png";
+                FileOutputStream out = new FileOutputStream(s);
+                bmp.compress(Bitmap.CompressFormat.PNG, 95, out);
+                out.close();
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+            bmp.recycle();
+        }
+    }
+}
diff --git a/java/tests/GenImages/src/com/android/rs/genimage/GenImageAct.java b/java/tests/GenImages/src/com/android/rs/genimage/GenImageAct.java
new file mode 100644
index 0000000..ea60d59
--- /dev/null
+++ b/java/tests/GenImages/src/com/android/rs/genimage/GenImageAct.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.rs.genimage;
+
+import android.app.Activity;
+import android.content.Context;
+import android.opengl.GLSurfaceView;
+import android.os.Bundle;
+
+public class GenImageAct extends Activity {
+
+    private GLSurfaceView mGLView;
+
+    @Override
+    public void onCreate(Bundle savedInstanceState) {
+        super.onCreate(savedInstanceState);
+
+        // Create a GLSurfaceView instance and set it
+        // as the ContentView for this Activity
+        mGLView = new MyGLSurfaceView(this);
+        setContentView(mGLView);
+    }
+
+    @Override
+    protected void onPause() {
+        super.onPause();
+        // The following call pauses the rendering thread.
+        // If your OpenGL application is memory intensive,
+        // you should consider de-allocating objects that
+        // consume significant memory here.
+        mGLView.onPause();
+    }
+
+    @Override
+    protected void onResume() {
+        super.onResume();
+        // The following call resumes a paused rendering thread.
+        // If you de-allocated graphic objects for onPause()
+        // this is a good place to re-allocate them.
+        mGLView.onResume();
+    }
+}
+
+class MyGLSurfaceView extends GLSurfaceView {
+
+    public MyGLSurfaceView(Context context) {
+        super(context);
+
+        // Create an OpenGL ES 2.0 context.
+        setEGLContextClientVersion(2);
+
+        // Set the Renderer for drawing on the GLSurfaceView
+        setRenderer(new GenImage(context));
+
+        // Render the view only when there is a change in the drawing data
+        setRenderMode(GLSurfaceView.RENDERMODE_CONTINUOUSLY);
+    }
+}
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/Histogram.java b/java/tests/ImageProcessing/src/com/android/rs/image/Histogram.java
new file mode 100644
index 0000000..edba274
--- /dev/null
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/Histogram.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.rs.image;
+
+import java.lang.Math;
+
+import android.renderscript.*;
+import android.util.Log;
+
+public class Histogram extends TestBase {
+    private ScriptC_histogram mScript;
+    private Allocation mSum;
+    private Allocation mSums;
+
+    public void createTest(android.content.res.Resources res) {
+        mScript = new ScriptC_histogram(mRS);
+
+        int w = mInPixelsAllocation.getType().getX();
+        int h = mInPixelsAllocation.getType().getY();
+        int step = 8;
+        int steps = (h + step - 1) / step;
+
+        mScript.set_gWidth(w);
+        mScript.set_gHeight(h);
+        mScript.set_gStep(step);
+        mScript.set_gSteps(steps);
+
+        Type.Builder tb = new Type.Builder(mRS, Element.I32(mRS));
+        tb.setX(256).setY(steps);
+        Type t = tb.create();
+        mSums = Allocation.createTyped(mRS, t);
+        mSum = Allocation.createSized(mRS, Element.I32(mRS), 256);
+
+        mScript.set_gSums(mSums);
+        mScript.set_gSum(mSum);
+        mScript.set_gSrc(mInPixelsAllocation);
+        mScript.set_gDest(mOutPixelsAllocation);
+
+        mScript.forEach_clear(mOutPixelsAllocation);
+    }
+
+
+
+    public void runTest() {
+        Script.LaunchOptions lo = new Script.LaunchOptions();
+        lo.setX(0, 1);
+        mScript.forEach_pass1(mSums, lo);
+        mScript.forEach_pass2(mSum);
+
+        mScript.invoke_rescale();
+
+        lo.setX(0, 1024);
+        mScript.forEach_draw(mOutPixelsAllocation, lo);
+    }
+
+}
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java b/java/tests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
index 975027a..0769362 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
@@ -98,7 +98,8 @@
         WHITE_BALANCE ("White Balance"),
         COLOR_CUBE ("Color Cube"),
         COLOR_CUBE_3D_INTRINSIC ("Color Cube (3D LUT intrinsic)"),
-        USAGE_IO ("Usage io)");
+        USAGE_IO ("Usage io"),
+        HISTOGRAM ("Histogram");
 
 
         private final String name;
@@ -359,6 +360,9 @@
         case USAGE_IO:
             mTest = new UsageIO();
             break;
+        case HISTOGRAM:
+            mTest = new Histogram();
+            break;
         }
 
         mTest.createBaseTest(this, mBitmapIn, mBitmapIn2, mBitmapOut);
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/LevelsV4.java b/java/tests/ImageProcessing/src/com/android/rs/image/LevelsV4.java
index 9eb5647..98c6460 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/LevelsV4.java
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/LevelsV4.java
@@ -109,7 +109,7 @@
     public boolean onBar4Setup(SeekBar b, TextView t) {
         b.setMax(128);
         b.setProgress(128);
-        t.setText("Out White");
+        t.setText("In White");
         return true;
     }
     public boolean onBar5Setup(SeekBar b, TextView t) {
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/WhiteBalance.java b/java/tests/ImageProcessing/src/com/android/rs/image/WhiteBalance.java
index a836067..e8078c2 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/WhiteBalance.java
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/WhiteBalance.java
@@ -25,13 +25,14 @@
 
     public void createTest(android.content.res.Resources res) {
         mScript = new ScriptC_wbalance(mRS);
-    }
 
-    public void runTest() {
         mScript.set_histogramSource(mInPixelsAllocation);
         mScript.set_histogramWidth(mInPixelsAllocation.getType().getX());
         mScript.set_histogramHeight(mInPixelsAllocation.getType().getY());
         mScript.invoke_prepareWhiteBalance();
+    }
+
+    public void runTest() {
         mScript.forEach_whiteBalanceKernel(mInPixelsAllocation, mOutPixelsAllocation);
     }
 
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/colorcube.rs b/java/tests/ImageProcessing/src/com/android/rs/image/colorcube.rs
index 4f1e73e..c0d6ace 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/colorcube.rs
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/colorcube.rs
@@ -56,16 +56,16 @@
     uint4 v011 = convert_uint4(rsGetElementAt_uchar4(gCube, coord1.x, coord2.y, coord2.z));
     uint4 v111 = convert_uint4(rsGetElementAt_uchar4(gCube, coord2.x, coord2.y, coord2.z));
 
-    uint4 yz00 = ((v000 * weight1.x) + (v100 * weight2.x)) >> (int4)8;
-    uint4 yz10 = ((v010 * weight1.x) + (v110 * weight2.x)) >> (int4)8;
-    uint4 yz01 = ((v001 * weight1.x) + (v101 * weight2.x)) >> (int4)8;
-    uint4 yz11 = ((v011 * weight1.x) + (v111 * weight2.x)) >> (int4)8;
+    uint4 yz00 = ((v000 * weight1.x) + (v100 * weight2.x)) >> (uint4)8;
+    uint4 yz10 = ((v010 * weight1.x) + (v110 * weight2.x)) >> (uint4)8;
+    uint4 yz01 = ((v001 * weight1.x) + (v101 * weight2.x)) >> (uint4)8;
+    uint4 yz11 = ((v011 * weight1.x) + (v111 * weight2.x)) >> (uint4)8;
 
-    uint4 z0 = ((yz00 * weight1.y) + (yz10 * weight2.y)) >> (int4)16;
-    uint4 z1 = ((yz01 * weight1.y) + (yz11 * weight2.y)) >> (int4)16;
+    uint4 z0 = ((yz00 * weight1.y) + (yz10 * weight2.y)) >> (uint4)16;
+    uint4 z1 = ((yz01 * weight1.y) + (yz11 * weight2.y)) >> (uint4)16;
 
-    uint4 v = ((z0 * weight1.z) + (z1 * weight2.z)) >> (int4)16;
-    uint4 v2 = (v + 0x7f) >> (int4)8;
+    uint4 v = ((z0 * weight1.z) + (z1 * weight2.z)) >> (uint4)16;
+    uint4 v2 = (v + 0x7f) >> (uint4)8;
 
     *out = convert_uchar4(v2);
     out->a = 0xff;
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/contrast.rs b/java/tests/ImageProcessing/src/com/android/rs/image/contrast.rs
index 5b67252..ef6fd63 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/contrast.rs
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/contrast.rs
@@ -27,12 +27,6 @@
 
 void contrast(const uchar4 *in, uchar4 *out)
 {
-#if 0
-    out->r = rsClamp((int)(brightM * in->r + brightC), 0, 255);
-    out->g = rsClamp((int)(brightM * in->g + brightC), 0, 255);
-    out->b = rsClamp((int)(brightM * in->b + brightC), 0, 255);
-#else
     float3 v = convert_float3(in->rgb) * brightM + brightC;
     out->rgb = convert_uchar3(clamp(v, 0.f, 255.f));
-#endif
 }
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/exposure.rs b/java/tests/ImageProcessing/src/com/android/rs/image/exposure.rs
index d9a8f62..88bb1d6 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/exposure.rs
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/exposure.rs
@@ -23,10 +23,11 @@
     bright = 255.f / (255.f - v);
 }
 
-void exposure(const uchar4 *in, uchar4 *out)
+uchar4 __attribute__((kernel)) exposure(uchar4 in)
 {
-    out->r = rsClamp((int)(bright * in->r), 0, 255);
-    out->g = rsClamp((int)(bright * in->g), 0, 255);
-    out->b = rsClamp((int)(bright * in->b), 0, 255);
+    uchar4 out = {0, 0, 0, 255};
+    float3 t = convert_float3(in.rgb);
+    out.rgb = convert_uchar3(clamp(convert_int3(t * bright), 0, 255));
+    return out;
 }
 
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/histogram.rs b/java/tests/ImageProcessing/src/com/android/rs/image/histogram.rs
new file mode 100644
index 0000000..dc0ec59
--- /dev/null
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/histogram.rs
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ip.rsh"
+
+rs_allocation gSrc;
+rs_allocation gDest;
+rs_allocation gSums;
+rs_allocation gSum;
+
+int gWidth;
+int gHeight;
+int gStep;
+int gSteps;
+
+void __attribute__((kernel)) pass1(int in, uint x, uint y) {
+    for (int i=0; i < (256); i++) {
+        rsSetElementAt_int(gSums, 0, i, y);
+    }
+
+    for (int i = 0; i < gStep; i++) {
+        int py = y*gStep + i;
+        if (py >= gHeight) return;
+
+        for (int px=0; px < gWidth; px++) {
+            uchar4 c = rsGetElementAt_uchar4(gSrc, px, py);
+            int lum = (77 * c.r + 150 * c.g + 29 * c.b) >> 8;
+
+            int old = rsGetElementAt_int(gSums, lum, y);
+            rsSetElementAt_int(gSums, old+1, lum, y);
+        }
+    }
+}
+
+int __attribute__((kernel)) pass2(uint x) {
+    int sum = 0;
+    for (int i=0; i < gSteps; i++) {
+        sum += rsGetElementAt_int(gSums, x, i);
+    }
+    return sum;
+}
+
+void rescale() {
+    int maxv = 0;
+
+    for (int i=0; i < 256; i++) {
+        maxv = max(maxv, rsGetElementAt_int(gSum, i));
+    }
+    float overMax = (1.f / maxv) * gHeight;
+
+    for (int i=0; i < 256; i++) {
+        int t = rsGetElementAt_int(gSum, i);
+        t = gHeight - (overMax * rsGetElementAt_int(gSum, i));
+        t = max(0, t);
+        rsSetElementAt_int(gSum, t, i);
+    }
+}
+
+static const uchar4 gClear = {0, 0, 0, 0xff};
+
+uchar4 __attribute__((kernel)) clear() {
+    return gClear;
+}
+
+uchar4 __attribute__((kernel)) draw(uint x, uint y) {
+    int l = rsGetElementAt_int(gSum, x >> 2);
+    if (y > l) {
+        return 0xff;
+    }
+    return gClear;
+}
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/vibrance.rs b/java/tests/ImageProcessing/src/com/android/rs/image/vibrance.rs
index 174c2c3..b82e1d3 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/vibrance.rs
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/vibrance.rs
@@ -23,11 +23,6 @@
 static const float Gf = 0.587f;
 static const float Bf = 0.114f;
 
-static float S  = 0.f;
-static float MS = 0.f;
-static float Rt = 0.f;
-static float Gt = 0.f;
-static float Bt = 0.f;
 static float Vib = 0.f;
 
 void vibranceKernel(const uchar4 *in, uchar4 *out) {
@@ -37,14 +32,13 @@
     int r = in->r;
     int g = in->g;
     int b = in->b;
-    float red = (r-max(g, b))/256.f;
-    float sx = (float)(Vib/(1+native_exp(-red*3)));
-    S = sx+1;
-    MS = 1.0f - S;
-    Rt = Rf * MS;
-    Gt = Gf * MS;
-    Bt = Bf * MS;
-    int t = (r + g) / 2;
+    float red = (r-max(g, b)) * (1.f / 256.f);
+    float S = (float)(Vib/(1+native_exp(-red*3)))+1;
+    float MS = 1.0f - S;
+    float Rt = Rf * MS;
+    float Gt = Gf * MS;
+    float Bt = Bf * MS;
+    int t = (r + g) >> 1;
     R = r;
     G = g;
     B = b;
@@ -60,12 +54,5 @@
 }
 
 void prepareVibrance() {
-
     Vib = vibrance/100.f;
-    S  = Vib + 1;
-    MS = 1.0f - S;
-    Rt = Rf * MS;
-    Gt = Gf * MS;
-    Bt = Bf * MS;
-
 }
diff --git a/java/tests/ImageProcessing/src/com/android/rs/image/wbalance.rs b/java/tests/ImageProcessing/src/com/android/rs/image/wbalance.rs
index b5ab14f..fbdd869 100644
--- a/java/tests/ImageProcessing/src/com/android/rs/image/wbalance.rs
+++ b/java/tests/ImageProcessing/src/com/android/rs/image/wbalance.rs
@@ -23,9 +23,7 @@
 uint32_t histogramHeight;
 uint32_t histogramWidth;
 
-static float scaleR;
-static float scaleG;
-static float scaleB;
+static float3 scale;
 
 static uchar4 estimateWhite() {
 
@@ -115,28 +113,18 @@
     int maximum = max(estimation.r, max(estimation.g, estimation.b));
     float avg = (minimum + maximum) / 2.f;
 
-    scaleR =  avg/estimation.r;
-    scaleG =  avg/estimation.g;
-    scaleB =  avg/estimation.b;
-
+    scale.r =  avg / estimation.r;
+    scale.g =  avg / estimation.g;
+    scale.b =  avg / estimation.b;
 }
 
-static unsigned char contrastClamp(int c)
-{
-    int N = 255;
-    c &= ~(c >> 31);
-    c -= N;
-    c &= (c >> 31);
-    c += N;
-    return  (unsigned char) c;
-}
+uchar4 __attribute__((kernel)) whiteBalanceKernel(uchar4 in) {
+    float3 t = convert_float3(in.rgb);
+    t *= scale;
+    t = min(t, 255.f);
 
-void whiteBalanceKernel(const uchar4 *in, uchar4 *out) {
-    float Rc =  in->r*scaleR;
-    float Gc =  in->g*scaleG;
-    float Bc =  in->b*scaleB;
-
-    out->r = contrastClamp(Rc);
-    out->g = contrastClamp(Gc);
-    out->b = contrastClamp(Bc);
+    uchar4 out;
+    out.rgb = convert_uchar3(t);
+    out.a = 255;
+    return out;
 }
diff --git a/java/tests/ImageProcessing2/src/com/android/rs/image/colorcube.rs b/java/tests/ImageProcessing2/src/com/android/rs/image/colorcube.rs
index 4f1e73e..c0d6ace 100644
--- a/java/tests/ImageProcessing2/src/com/android/rs/image/colorcube.rs
+++ b/java/tests/ImageProcessing2/src/com/android/rs/image/colorcube.rs
@@ -56,16 +56,16 @@
     uint4 v011 = convert_uint4(rsGetElementAt_uchar4(gCube, coord1.x, coord2.y, coord2.z));
     uint4 v111 = convert_uint4(rsGetElementAt_uchar4(gCube, coord2.x, coord2.y, coord2.z));
 
-    uint4 yz00 = ((v000 * weight1.x) + (v100 * weight2.x)) >> (int4)8;
-    uint4 yz10 = ((v010 * weight1.x) + (v110 * weight2.x)) >> (int4)8;
-    uint4 yz01 = ((v001 * weight1.x) + (v101 * weight2.x)) >> (int4)8;
-    uint4 yz11 = ((v011 * weight1.x) + (v111 * weight2.x)) >> (int4)8;
+    uint4 yz00 = ((v000 * weight1.x) + (v100 * weight2.x)) >> (uint4)8;
+    uint4 yz10 = ((v010 * weight1.x) + (v110 * weight2.x)) >> (uint4)8;
+    uint4 yz01 = ((v001 * weight1.x) + (v101 * weight2.x)) >> (uint4)8;
+    uint4 yz11 = ((v011 * weight1.x) + (v111 * weight2.x)) >> (uint4)8;
 
-    uint4 z0 = ((yz00 * weight1.y) + (yz10 * weight2.y)) >> (int4)16;
-    uint4 z1 = ((yz01 * weight1.y) + (yz11 * weight2.y)) >> (int4)16;
+    uint4 z0 = ((yz00 * weight1.y) + (yz10 * weight2.y)) >> (uint4)16;
+    uint4 z1 = ((yz01 * weight1.y) + (yz11 * weight2.y)) >> (uint4)16;
 
-    uint4 v = ((z0 * weight1.z) + (z1 * weight2.z)) >> (int4)16;
-    uint4 v2 = (v + 0x7f) >> (int4)8;
+    uint4 v = ((z0 * weight1.z) + (z1 * weight2.z)) >> (uint4)16;
+    uint4 v2 = (v + 0x7f) >> (uint4)8;
 
     *out = convert_uchar4(v2);
     out->a = 0xff;
diff --git a/java/tests/LivePreview/src/com/android/rs/livepreview/CameraPreviewActivity.java b/java/tests/LivePreview/src/com/android/rs/livepreview/CameraPreviewActivity.java
index 62dcaa8..be0af4b 100644
--- a/java/tests/LivePreview/src/com/android/rs/livepreview/CameraPreviewActivity.java
+++ b/java/tests/LivePreview/src/com/android/rs/livepreview/CameraPreviewActivity.java
@@ -228,11 +228,14 @@
 
 
         // Set initial values
+	//
+        int initialSize = mPreviewSizes.size() - 1;
 
-        mNextPreviewSize = mPreviewSizes.get(15);
-        mResolutionSpinner.setSelection(15);
+	mNextPreviewSize = mPreviewSizes.get(initialSize);
+        mResolutionSpinner.setSelection(initialSize);
 
-        if (mPreviewTexture != null) {
+	if(mPreviewTexture != null)
+	{
             startPreview();
         }
     }
@@ -371,4 +374,4 @@
 
 
 
-}
\ No newline at end of file
+}
diff --git a/rsContext.cpp b/rsContext.cpp
index bb2808e..2ec58c3 100644
--- a/rsContext.cpp
+++ b/rsContext.cpp
@@ -33,7 +33,7 @@
 #include <string.h>
 #include <dlfcn.h>
 
-#ifndef RS_SERVER
+#if !defined(RS_SERVER) && defined(HAVE_ANDROID_OS)
 #include <cutils/properties.h>
 #endif
 
@@ -204,7 +204,7 @@
 #endif
 
 static uint32_t getProp(const char *str) {
-#ifndef RS_SERVER
+#if !defined(RS_SERVER) && defined(HAVE_ANDROID_OS)
     char buf[PROPERTY_VALUE_MAX];
     property_get(str, buf, "0");
     return atoi(buf);
diff --git a/rsContext.h b/rsContext.h
index abc9b27..4162b64 100644
--- a/rsContext.h
+++ b/rsContext.h
@@ -26,6 +26,11 @@
 #include "rsScriptGroup.h"
 #include "rsSampler.h"
 
+#ifndef RS_SERVER
+#define ATRACE_TAG ATRACE_TAG_RS
+#include "utils/Trace.h"
+#endif
+
 #ifndef RS_COMPATIBILITY_LIB
 #include "rsFont.h"
 #include "rsPath.h"
@@ -34,6 +39,7 @@
 #include "rsProgramRaster.h"
 #include "rsProgramVertex.h"
 #include "rsFBOCache.h"
+
 #endif
 
 
@@ -62,6 +68,8 @@
 #define CHECK_OBJ_OR_NULL(o)
 #endif
 
+
+
 class Context {
 public:
     struct Hal {
diff --git a/rsCppUtils.h b/rsCppUtils.h
index abae7d8..f16325f 100644
--- a/rsCppUtils.h
+++ b/rsCppUtils.h
@@ -34,6 +34,9 @@
 
 #ifdef RS_SERVER
 
+#define ATRACE_TAG
+#define ATRACE_CALL(...)
+
 #include <string>
 #include <vector>
 #include <algorithm>
diff --git a/rsFont.cpp b/rsFont.cpp
index 3665a3d..958ecb5 100644
--- a/rsFont.cpp
+++ b/rsFont.cpp
@@ -20,7 +20,9 @@
 #include "rsFont.h"
 #include "rsProgramFragment.h"
 #include "rsMesh.h"
+#ifdef HAVE_ANDROID_OS
 #include <cutils/properties.h>
+#endif
 
 #ifndef ANDROID_RS_SERIALIZE
 #include <ft2build.h>
@@ -337,27 +339,31 @@
     mLibrary = NULL;
 #endif //ANDROID_RS_SERIALIZE
 
+    float gamma = DEFAULT_TEXT_GAMMA;
+    int32_t blackThreshold = DEFAULT_TEXT_BLACK_GAMMA_THRESHOLD;
+    int32_t whiteThreshold = DEFAULT_TEXT_WHITE_GAMMA_THRESHOLD;
+
+#ifdef HAVE_ANDROID_OS
     // Get the renderer properties
     char property[PROPERTY_VALUE_MAX];
 
     // Get the gamma
-    float gamma = DEFAULT_TEXT_GAMMA;
     if (property_get(PROPERTY_TEXT_GAMMA, property, NULL) > 0) {
         gamma = atof(property);
     }
 
     // Get the black gamma threshold
-    int32_t blackThreshold = DEFAULT_TEXT_BLACK_GAMMA_THRESHOLD;
     if (property_get(PROPERTY_TEXT_BLACK_GAMMA_THRESHOLD, property, NULL) > 0) {
         blackThreshold = atoi(property);
     }
-    mBlackThreshold = (float)(blackThreshold) / 255.0f;
 
     // Get the white gamma threshold
-    int32_t whiteThreshold = DEFAULT_TEXT_WHITE_GAMMA_THRESHOLD;
     if (property_get(PROPERTY_TEXT_WHITE_GAMMA_THRESHOLD, property, NULL) > 0) {
         whiteThreshold = atoi(property);
     }
+#endif
+
+    mBlackThreshold = (float)(blackThreshold) / 255.0f;
     mWhiteThreshold = (float)(whiteThreshold) / 255.0f;
 
     // Compute the gamma tables
diff --git a/rsScriptC.cpp b/rsScriptC.cpp
index 56c9f06..3d9cd11 100644
--- a/rsScriptC.cpp
+++ b/rsScriptC.cpp
@@ -168,6 +168,8 @@
                          size_t usrBytes,
                          const RsScriptCall *sc) {
 
+    ATRACE_CALL();
+
     Context::PushState ps(rsc);
 
     setupGLState(rsc);
@@ -176,6 +178,8 @@
 }
 
 void ScriptC::Invoke(Context *rsc, uint32_t slot, const void *data, size_t len) {
+    ATRACE_CALL();
+
     if (slot >= mHal.info.exportedFunctionCount) {
         rsc->setError(RS_ERROR_BAD_SCRIPT, "Calling invoke on bad script");
         return;
@@ -230,7 +234,7 @@
                           const char *cacheDir,
                           const uint8_t *bitcode,
                           size_t bitcodeLen) {
-
+    ATRACE_CALL();
     //ALOGE("runCompiler %p %p %p %p %p %i", rsc, this, resName, cacheDir, bitcode, bitcodeLen);
 #ifndef RS_COMPATIBILITY_LIB
 #ifndef ANDROID_RS_SERIALIZE
diff --git a/scriptc/rs_cl.rsh b/scriptc/rs_cl.rsh
index 788aea8..6dc971c 100644
--- a/scriptc/rs_cl.rsh
+++ b/scriptc/rs_cl.rsh
@@ -25,11 +25,11 @@
 
 // Conversions
 #define CVT_FUNC_2(typeout, typein)                             \
-_RS_RUNTIME typeout##2 __attribute__((overloadable))            \
+_RS_RUNTIME typeout##2 __attribute__((const, overloadable))     \
         convert_##typeout##2(typein##2 v);                      \
-_RS_RUNTIME typeout##3 __attribute__((overloadable))            \
+_RS_RUNTIME typeout##3 __attribute__((const, overloadable))     \
         convert_##typeout##3(typein##3 v);                      \
-_RS_RUNTIME typeout##4 __attribute__((overloadable))            \
+_RS_RUNTIME typeout##4 __attribute__((const, overloadable))     \
         convert_##typeout##4(typein##4 v);
 
 
@@ -92,89 +92,89 @@
 
 // Float ops, 6.11.2
 
-#define FN_FUNC_FN(fnc)                                         \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v); \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v); \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v);
+#define FN_FUNC_FN(fnc)                                                \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) fnc(float2 v); \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) fnc(float3 v); \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) fnc(float4 v);
 
-#define F_FUNC_FN(fnc)                                          \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float2 v);  \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float3 v);  \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float4 v);
+#define F_FUNC_FN(fnc)                                                \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float2 v); \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float3 v); \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float4 v);
 
-#define IN_FUNC_FN(fnc)                                         \
-_RS_RUNTIME int2 __attribute__((overloadable)) fnc(float2 v);   \
-_RS_RUNTIME int3 __attribute__((overloadable)) fnc(float3 v);   \
-_RS_RUNTIME int4 __attribute__((overloadable)) fnc(float4 v);
+#define IN_FUNC_FN(fnc)                                              \
+_RS_RUNTIME int2 __attribute__((const, overloadable)) fnc(float2 v); \
+_RS_RUNTIME int3 __attribute__((const, overloadable)) fnc(float3 v); \
+_RS_RUNTIME int4 __attribute__((const, overloadable)) fnc(float4 v);
 
-#define FN_FUNC_FN_FN(fnc)                                                  \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2); \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2); \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, float4 v2);
+#define FN_FUNC_FN_FN(fnc)                                                         \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) fnc(float2 v1, float2 v2); \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) fnc(float3 v1, float3 v2); \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) fnc(float4 v1, float4 v2);
 
-#define F_FUNC_FN_FN(fnc)                                                   \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float2 v1, float2 v2);  \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float3 v1, float3 v2);  \
-_RS_RUNTIME float __attribute__((overloadable)) fnc(float4 v1, float4 v2);
+#define F_FUNC_FN_FN(fnc)                                                         \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float2 v1, float2 v2); \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float3 v1, float3 v2); \
+_RS_RUNTIME float __attribute__((const, overloadable)) fnc(float4 v1, float4 v2);
 
-#define FN_FUNC_FN_F(fnc)                                                   \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float v2);  \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float v2);  \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, float v2);
+#define FN_FUNC_FN_F(fnc)                                                         \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) fnc(float2 v1, float v2); \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) fnc(float3 v1, float v2); \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) fnc(float4 v1, float v2);
 
-#define FN_FUNC_FN_IN(fnc)                                                  \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2);   \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2);   \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2);   \
+#define FN_FUNC_FN_IN(fnc)                                                       \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) fnc(float2 v1, int2 v2); \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) fnc(float3 v1, int3 v2); \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) fnc(float4 v1, int4 v2);
 
-#define FN_FUNC_FN_I(fnc)                                                   \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int v2);    \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int v2);    \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int v2);
+#define FN_FUNC_FN_I(fnc)                                                       \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) fnc(float2 v1, int v2); \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) fnc(float3 v1, int v2); \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) fnc(float4 v1, int v2);
 
-#define FN_FUNC_FN_PFN(fnc)                         \
-_RS_RUNTIME float2 __attribute__((overloadable))    \
-        fnc(float2 v1, float2 *v2);                 \
-_RS_RUNTIME float3 __attribute__((overloadable))    \
-        fnc(float3 v1, float3 *v2);                 \
-_RS_RUNTIME float4 __attribute__((overloadable))    \
+#define FN_FUNC_FN_PFN(fnc)                            \
+_RS_RUNTIME float2 __attribute__((pure, overloadable)) \
+        fnc(float2 v1, float2 *v2);                    \
+_RS_RUNTIME float3 __attribute__((pure, overloadable)) \
+        fnc(float3 v1, float3 *v2);                    \
+_RS_RUNTIME float4 __attribute__((pure, overloadable)) \
         fnc(float4 v1, float4 *v2);
 
-#define FN_FUNC_FN_PIN(fnc)                                                 \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2);  \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2);  \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int4 *v2);
+#define FN_FUNC_FN_PIN(fnc)                                                      \
+_RS_RUNTIME float2 __attribute__((pure, overloadable)) fnc(float2 v1, int2 *v2); \
+_RS_RUNTIME float3 __attribute__((pure, overloadable)) fnc(float3 v1, int3 *v2); \
+_RS_RUNTIME float4 __attribute__((pure, overloadable)) fnc(float4 v1, int4 *v2);
 
-#define FN_FUNC_FN_FN_FN(fnc)                       \
-_RS_RUNTIME float2 __attribute__((overloadable))    \
-        fnc(float2 v1, float2 v2, float2 v3);       \
-_RS_RUNTIME float3 __attribute__((overloadable))    \
-        fnc(float3 v1, float3 v2, float3 v3);       \
-_RS_RUNTIME float4 __attribute__((overloadable))    \
+#define FN_FUNC_FN_FN_FN(fnc)                           \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) \
+        fnc(float2 v1, float2 v2, float2 v3);           \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) \
+        fnc(float3 v1, float3 v2, float3 v3);           \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) \
         fnc(float4 v1, float4 v2, float4 v3);
 
-#define FN_FUNC_FN_FN_F(fnc)                        \
-_RS_RUNTIME float2 __attribute__((overloadable))    \
-        fnc(float2 v1, float2 v2, float v3);        \
-_RS_RUNTIME float3 __attribute__((overloadable))    \
-        fnc(float3 v1, float3 v2, float v3);        \
-_RS_RUNTIME float4 __attribute__((overloadable))    \
+#define FN_FUNC_FN_FN_F(fnc)                            \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) \
+        fnc(float2 v1, float2 v2, float v3);            \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) \
+        fnc(float3 v1, float3 v2, float v3);            \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) \
         fnc(float4 v1, float4 v2, float v3);
 
-#define FN_FUNC_FN_F_F(fnc)                         \
-_RS_RUNTIME float2 __attribute__((overloadable))    \
-        fnc(float2 v1, float v2, float v3);         \
-_RS_RUNTIME float3 __attribute__((overloadable))    \
-        fnc(float3 v1, float v2, float v3);         \
-_RS_RUNTIME float4 __attribute__((overloadable))    \
+#define FN_FUNC_FN_F_F(fnc)                             \
+_RS_RUNTIME float2 __attribute__((const, overloadable)) \
+        fnc(float2 v1, float v2, float v3);             \
+_RS_RUNTIME float3 __attribute__((const, overloadable)) \
+        fnc(float3 v1, float v2, float v3);             \
+_RS_RUNTIME float4 __attribute__((const, overloadable)) \
         fnc(float4 v1, float v2, float v3);
 
-#define FN_FUNC_FN_FN_PIN(fnc)                      \
-_RS_RUNTIME float2 __attribute__((overloadable))    \
-        fnc(float2 v1, float2 v2, int2 *v3);        \
-_RS_RUNTIME float3 __attribute__((overloadable))    \
-        fnc(float3 v1, float3 v2, int3 *v3);        \
-_RS_RUNTIME float4 __attribute__((overloadable))    \
+#define FN_FUNC_FN_FN_PIN(fnc)                         \
+_RS_RUNTIME float2 __attribute__((pure, overloadable)) \
+        fnc(float2 v1, float2 v2, int2 *v3);           \
+_RS_RUNTIME float3 __attribute__((pure, overloadable)) \
+        fnc(float3 v1, float3 v2, int3 *v3);           \
+_RS_RUNTIME float4 __attribute__((pure, overloadable)) \
         fnc(float4 v1, float4 v2, int4 *v3);
 
 
@@ -183,7 +183,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) acos(float);
+extern float __attribute__((const, overloadable)) acos(float);
 FN_FUNC_FN(acos)
 
 /**
@@ -191,7 +191,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) acosh(float);
+extern float __attribute__((const, overloadable)) acosh(float);
 FN_FUNC_FN(acosh)
 
 /**
@@ -199,7 +199,7 @@
  *
  * Supports float, float2, float3, float4
  */
-_RS_RUNTIME float __attribute__((overloadable)) acospi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) acospi(float v);
 FN_FUNC_FN(acospi)
 
 /**
@@ -207,7 +207,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) asin(float);
+extern float __attribute__((const, overloadable)) asin(float);
 FN_FUNC_FN(asin)
 
 /**
@@ -215,7 +215,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) asinh(float);
+extern float __attribute__((const, overloadable)) asinh(float);
 FN_FUNC_FN(asinh)
 
 
@@ -224,7 +224,7 @@
  *
  * Supports float, float2, float3, float4
  */
-_RS_RUNTIME float __attribute__((overloadable)) asinpi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) asinpi(float v);
 FN_FUNC_FN(asinpi)
 
 /**
@@ -232,7 +232,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) atan(float);
+extern float __attribute__((const, overloadable)) atan(float);
 FN_FUNC_FN(atan)
 
 /**
@@ -244,7 +244,7 @@
  * @param y
  * @param x
  */
-extern float __attribute__((overloadable)) atan2(float y, float x);
+extern float __attribute__((const, overloadable)) atan2(float y, float x);
 FN_FUNC_FN_FN(atan2)
 
 /**
@@ -252,7 +252,7 @@
  *
  * Supports float, float2, float3, float4
  */
-extern float __attribute__((overloadable)) atanh(float);
+extern float __attribute__((const, overloadable)) atanh(float);
 FN_FUNC_FN(atanh)
 
 /**
@@ -260,7 +260,7 @@
  *
  * Supports float, float2, float3, float4
  */
-_RS_RUNTIME float __attribute__((overloadable)) atanpi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) atanpi(float v);
 FN_FUNC_FN(atanpi)
 
 /**
@@ -272,7 +272,7 @@
  * @param y
  * @param x
  */
-_RS_RUNTIME float __attribute__((overloadable)) atan2pi(float y, float x);
+_RS_RUNTIME float __attribute__((const, overloadable)) atan2pi(float y, float x);
 FN_FUNC_FN_FN(atan2pi)
 
 
@@ -281,7 +281,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) cbrt(float);
+extern float __attribute__((const, overloadable)) cbrt(float);
 FN_FUNC_FN(cbrt)
 
 /**
@@ -289,7 +289,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) ceil(float);
+extern float __attribute__((const, overloadable)) ceil(float);
 FN_FUNC_FN(ceil)
 
 /**
@@ -301,7 +301,7 @@
  * @param x
  * @param y
  */
-extern float __attribute__((overloadable)) copysign(float x, float y);
+extern float __attribute__((const, overloadable)) copysign(float x, float y);
 FN_FUNC_FN_FN(copysign)
 
 /**
@@ -309,7 +309,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) cos(float);
+extern float __attribute__((const, overloadable)) cos(float);
 FN_FUNC_FN(cos)
 
 /**
@@ -317,7 +317,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) cosh(float);
+extern float __attribute__((const, overloadable)) cosh(float);
 FN_FUNC_FN(cosh)
 
 /**
@@ -325,7 +325,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-_RS_RUNTIME float __attribute__((overloadable)) cospi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) cospi(float v);
 FN_FUNC_FN(cospi)
 
 /**
@@ -333,7 +333,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) erfc(float);
+extern float __attribute__((const, overloadable)) erfc(float);
 FN_FUNC_FN(erfc)
 
 /**
@@ -341,7 +341,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) erf(float);
+extern float __attribute__((const, overloadable)) erf(float);
 FN_FUNC_FN(erf)
 
 /**
@@ -349,7 +349,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) exp(float);
+extern float __attribute__((const, overloadable)) exp(float);
 FN_FUNC_FN(exp)
 
 /**
@@ -357,7 +357,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) exp2(float);
+extern float __attribute__((const, overloadable)) exp2(float);
 FN_FUNC_FN(exp2)
 
 /**
@@ -366,7 +366,7 @@
  * Supports float, float2, float3, float4. Both arguments must be of the same
  * type.
  */
-extern float __attribute__((overloadable)) pow(float x, float y);
+extern float __attribute__((const, overloadable)) pow(float x, float y);
 FN_FUNC_FN_FN(pow)
 
 /**
@@ -374,7 +374,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-_RS_RUNTIME float __attribute__((overloadable)) exp10(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) exp10(float v);
 FN_FUNC_FN(exp10)
 
 /**
@@ -382,7 +382,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) expm1(float);
+extern float __attribute__((const, overloadable)) expm1(float);
 FN_FUNC_FN(expm1)
 
 /**
@@ -390,7 +390,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) fabs(float);
+extern float __attribute__((const, overloadable)) fabs(float);
 FN_FUNC_FN(fabs)
 
 /**
@@ -399,7 +399,7 @@
  * Supports float, float2, float3, float4.  Both arguments must be of the same
  * type.
  */
-extern float __attribute__((overloadable)) fdim(float, float);
+extern float __attribute__((const, overloadable)) fdim(float, float);
 FN_FUNC_FN_FN(fdim)
 
 /**
@@ -407,7 +407,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) floor(float);
+extern float __attribute__((const, overloadable)) floor(float);
 FN_FUNC_FN(floor)
 
 /**
@@ -415,7 +415,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) fma(float a, float b, float c);
+extern float __attribute__((const, overloadable)) fma(float a, float b, float c);
 FN_FUNC_FN_FN_FN(fma)
 
 /**
@@ -425,7 +425,7 @@
  * @param x: may be float, float2, float3, float4
  * @param y: may be float or vector.  If vector must match type of x.
  */
-extern float __attribute__((overloadable)) fmax(float x, float y);
+extern float __attribute__((const, overloadable)) fmax(float x, float y);
 FN_FUNC_FN_FN(fmax);
 FN_FUNC_FN_F(fmax);
 
@@ -435,7 +435,7 @@
  * @param x: may be float, float2, float3, float4
  * @param y: may be float or vector.  If vector must match type of x.
  */
-extern float __attribute__((overloadable)) fmin(float x, float y);
+extern float __attribute__((const, overloadable)) fmin(float x, float y);
 FN_FUNC_FN_FN(fmin);
 FN_FUNC_FN_F(fmin);
 
@@ -444,7 +444,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) fmod(float x, float y);
+extern float __attribute__((const, overloadable)) fmod(float x, float y);
 FN_FUNC_FN_FN(fmod)
 
 /**
@@ -453,7 +453,7 @@
  * @param iptr  iptr[0] will be set to the floor of the input value.
  * Supports float, float2, float3, float4.
  */
-_RS_RUNTIME float __attribute__((overloadable)) fract(float v, float *iptr);
+_RS_RUNTIME float __attribute__((pure, overloadable)) fract(float v, float *iptr);
 FN_FUNC_FN_PFN(fract)
 
 /**
@@ -461,22 +461,22 @@
  *
  * Supports float, float2, float3, float4.
  */
-static inline float __attribute__((overloadable)) fract(float v) {
+static inline float __attribute__((const, overloadable)) fract(float v) {
     float unused;
     return fract(v, &unused);
 }
 
-static inline float2 __attribute__((overloadable)) fract(float2 v) {
+static inline float2 __attribute__((const, overloadable)) fract(float2 v) {
     float2 unused;
     return fract(v, &unused);
 }
 
-static inline float3 __attribute__((overloadable)) fract(float3 v) {
+static inline float3 __attribute__((const, overloadable)) fract(float3 v) {
     float3 unused;
     return fract(v, &unused);
 }
 
-static inline float4 __attribute__((overloadable)) fract(float4 v) {
+static inline float4 __attribute__((const, overloadable)) fract(float4 v) {
     float4 unused;
     return fract(v, &unused);
 }
@@ -487,7 +487,7 @@
  * @param v Supports float, float2, float3, float4.
  * @param iptr  Must have the same vector size as v.
  */
-extern float __attribute__((overloadable)) frexp(float v, int *iptr);
+extern float __attribute__((pure, overloadable)) frexp(float v, int *iptr);
 FN_FUNC_FN_PIN(frexp)
 
 /**
@@ -495,7 +495,7 @@
  *
  * Supports float, float2, float3, float4.
  */
-extern float __attribute__((overloadable)) hypot(float x, float y);
+extern float __attribute__((const, overloadable)) hypot(float x, float y);
 FN_FUNC_FN_FN(hypot)
 
 /**
@@ -503,7 +503,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern int __attribute__((overloadable)) ilogb(float);
+extern int __attribute__((const, overloadable)) ilogb(float);
 IN_FUNC_FN(ilogb)
 
 /**
@@ -512,7 +512,7 @@
  * @param x Supports 1,2,3,4 components
  * @param y Supports single component or matching vector.
  */
-extern float __attribute__((overloadable)) ldexp(float x, int y);
+extern float __attribute__((const, overloadable)) ldexp(float x, int y);
 FN_FUNC_FN_IN(ldexp)
 FN_FUNC_FN_I(ldexp)
 
@@ -521,7 +521,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) lgamma(float);
+extern float __attribute__((const, overloadable)) lgamma(float);
 FN_FUNC_FN(lgamma)
 
 /**
@@ -530,7 +530,7 @@
  * @param x Supports 1,2,3,4 components
  * @param y Supports matching vector.
  */
-extern float __attribute__((overloadable)) lgamma(float x, int* y);
+extern float __attribute__((pure, overloadable)) lgamma(float x, int* y);
 FN_FUNC_FN_PIN(lgamma)
 
 /**
@@ -538,7 +538,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) log(float);
+extern float __attribute__((const, overloadable)) log(float);
 FN_FUNC_FN(log)
 
 /**
@@ -546,7 +546,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) log10(float);
+extern float __attribute__((const, overloadable)) log10(float);
 FN_FUNC_FN(log10)
 
 /**
@@ -554,7 +554,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) log2(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) log2(float v);
 FN_FUNC_FN(log2)
 
 /**
@@ -562,7 +562,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) log1p(float v);
+extern float __attribute__((const, overloadable)) log1p(float v);
 FN_FUNC_FN(log1p)
 
 /**
@@ -570,7 +570,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) logb(float);
+extern float __attribute__((const, overloadable)) logb(float);
 FN_FUNC_FN(logb)
 
 /**
@@ -578,7 +578,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) mad(float a, float b, float c);
+extern float __attribute__((const, overloadable)) mad(float a, float b, float c);
 FN_FUNC_FN_FN_FN(mad)
 
 /**
@@ -589,17 +589,17 @@
  * @param iret iret[0] will be set to the integral portion of the number.
  * @return The floating point portion of the value.
  */
-extern float __attribute__((overloadable)) modf(float x, float *iret);
+extern float __attribute__((pure, overloadable)) modf(float x, float *iret);
 FN_FUNC_FN_PFN(modf);
 
-extern float __attribute__((overloadable)) nan(uint);
+extern float __attribute__((const, overloadable)) nan(uint);
 
 /**
  * Return the next floating point number from x towards y.
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) nextafter(float x, float y);
+extern float __attribute__((const, overloadable)) nextafter(float x, float y);
 FN_FUNC_FN_FN(nextafter)
 
 /**
@@ -607,7 +607,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) pown(float v, int p);
+_RS_RUNTIME float __attribute__((const, overloadable)) pown(float v, int p);
 FN_FUNC_FN_IN(pown)
 
 /**
@@ -616,7 +616,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) powr(float v, float p);
+_RS_RUNTIME float __attribute__((const, overloadable)) powr(float v, float p);
 FN_FUNC_FN_FN(powr)
 
 /**
@@ -624,11 +624,11 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) remainder(float x, float y);
+extern float __attribute__((const, overloadable)) remainder(float x, float y);
 FN_FUNC_FN_FN(remainder)
 
 // document once we know the precision of bionic
-extern float __attribute__((overloadable)) remquo(float, float, int *);
+extern float __attribute__((pure, overloadable)) remquo(float, float, int *);
 FN_FUNC_FN_FN_PIN(remquo)
 
 /**
@@ -636,7 +636,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) rint(float);
+extern float __attribute__((const, overloadable)) rint(float);
 FN_FUNC_FN(rint)
 
 /**
@@ -644,7 +644,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) rootn(float v, int n);
+_RS_RUNTIME float __attribute__((const, overloadable)) rootn(float v, int n);
 FN_FUNC_FN_IN(rootn)
 
 /**
@@ -652,7 +652,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) round(float);
+extern float __attribute__((const, overloadable)) round(float);
 FN_FUNC_FN(round)
 
 /**
@@ -660,7 +660,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) sqrt(float);
+extern float __attribute__((const, overloadable)) sqrt(float);
 FN_FUNC_FN(sqrt)
 
 /**
@@ -668,7 +668,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) rsqrt(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) rsqrt(float v);
 FN_FUNC_FN(rsqrt)
 
 /**
@@ -677,7 +677,7 @@
  * @param v The incoming value in radians
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) sin(float v);
+extern float __attribute__((const, overloadable)) sin(float v);
 FN_FUNC_FN(sin)
 
 /**
@@ -689,7 +689,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) sincos(float v, float *cosptr);
+_RS_RUNTIME float __attribute__((pure, overloadable)) sincos(float v, float *cosptr);
 FN_FUNC_FN_PFN(sincos);
 
 /**
@@ -697,7 +697,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) sinh(float);
+extern float __attribute__((const, overloadable)) sinh(float);
 FN_FUNC_FN(sinh)
 
 /**
@@ -705,7 +705,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) sinpi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) sinpi(float v);
 FN_FUNC_FN(sinpi)
 
 /**
@@ -714,7 +714,7 @@
  * Supports 1,2,3,4 components
  * @param v The incoming value in radians
  */
-extern float __attribute__((overloadable)) tan(float v);
+extern float __attribute__((const, overloadable)) tan(float v);
 FN_FUNC_FN(tan)
 
 /**
@@ -723,7 +723,7 @@
  * Supports 1,2,3,4 components
  * @param v The incoming value in radians
  */
-extern float __attribute__((overloadable)) tanh(float);
+extern float __attribute__((const, overloadable)) tanh(float);
 FN_FUNC_FN(tanh)
 
 /**
@@ -731,7 +731,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) tanpi(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) tanpi(float v);
 FN_FUNC_FN(tanpi)
 
 /**
@@ -739,7 +739,7 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) tgamma(float);
+extern float __attribute__((const, overloadable)) tgamma(float);
 FN_FUNC_FN(tgamma)
 
 /**
@@ -747,15 +747,15 @@
  *
  * Supports 1,2,3,4 components
  */
-extern float __attribute__((overloadable)) trunc(float);
+extern float __attribute__((const, overloadable)) trunc(float);
 FN_FUNC_FN(trunc)
 
 
-#define XN_FUNC_YN(typeout, fnc, typein)                                \
-extern typeout __attribute__((overloadable)) fnc(typein);               \
-_RS_RUNTIME typeout##2 __attribute__((overloadable)) fnc(typein##2 v);  \
-_RS_RUNTIME typeout##3 __attribute__((overloadable)) fnc(typein##3 v);  \
-_RS_RUNTIME typeout##4 __attribute__((overloadable)) fnc(typein##4 v);
+#define XN_FUNC_YN(typeout, fnc, typein)                                      \
+extern typeout __attribute__((const, overloadable)) fnc(typein);              \
+_RS_RUNTIME typeout##2 __attribute__((const, overloadable)) fnc(typein##2 v); \
+_RS_RUNTIME typeout##3 __attribute__((const, overloadable)) fnc(typein##3 v); \
+_RS_RUNTIME typeout##4 __attribute__((const, overloadable)) fnc(typein##4 v);
 
 #define UIN_FUNC_IN(fnc)          \
 XN_FUNC_YN(uchar, fnc, char)      \
@@ -771,14 +771,14 @@
 XN_FUNC_YN(int, fnc, int)
 
 
-#define XN_FUNC_XN_XN_BODY(type, fnc, body)         \
-_RS_RUNTIME type __attribute__((overloadable))      \
-        fnc(type v1, type v2);                      \
-_RS_RUNTIME type##2 __attribute__((overloadable))   \
-        fnc(type##2 v1, type##2 v2);                \
-_RS_RUNTIME type##3 __attribute__((overloadable))   \
-        fnc(type##3 v1, type##3 v2);                \
-_RS_RUNTIME type##4 __attribute__((overloadable))   \
+#define XN_FUNC_XN_XN_BODY(type, fnc, body)              \
+_RS_RUNTIME type __attribute__((const, overloadable))    \
+        fnc(type v1, type v2);                           \
+_RS_RUNTIME type##2 __attribute__((const, overloadable)) \
+        fnc(type##2 v1, type##2 v2);                     \
+_RS_RUNTIME type##3 __attribute__((const, overloadable)) \
+        fnc(type##3 v1, type##3 v2);                     \
+_RS_RUNTIME type##4 __attribute__((const, overloadable)) \
         fnc(type##4 v1, type##4 v2);
 
 #define IN_FUNC_IN_IN_BODY(fnc, body)   \
@@ -827,16 +827,41 @@
  * @param low Lower bound, must be scalar or matching vector.
  * @param high High bound, must match type of low
  */
-_RS_RUNTIME float __attribute__((overloadable)) clamp(float amount, float low, float high);
+
+#if !defined(RS_VERSION) || (RS_VERSION < 19)
+_RS_RUNTIME float __attribute__((const, overloadable)) clamp(float amount, float low, float high);
 FN_FUNC_FN_FN_FN(clamp)
 FN_FUNC_FN_F_F(clamp)
+#else
+#define _CLAMP(T)                                                                   \
+extern T __attribute__((overloadable)) clamp(T amount, T low, T high);              \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T##2 low, T##2 high);  \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T##3 low, T##3 high);  \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T##4 low, T##4 high);  \
+extern T##2 __attribute__((overloadable)) clamp(T##2 amount, T low, T high);        \
+extern T##3 __attribute__((overloadable)) clamp(T##3 amount, T low, T high);        \
+extern T##4 __attribute__((overloadable)) clamp(T##4 amount, T low, T high)
+
+_CLAMP(float);
+_CLAMP(double);
+_CLAMP(char);
+_CLAMP(uchar);
+_CLAMP(short);
+_CLAMP(ushort);
+_CLAMP(int);
+_CLAMP(uint);
+_CLAMP(long);
+_CLAMP(ulong);
+
+#undef _CLAMP
+#endif
 
 /**
  * Convert from radians to degrees.
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) degrees(float radians);
+_RS_RUNTIME float __attribute__((const, overloadable)) degrees(float radians);
 FN_FUNC_FN(degrees)
 
 /**
@@ -844,7 +869,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) mix(float start, float stop, float amount);
+_RS_RUNTIME float __attribute__((const, overloadable)) mix(float start, float stop, float amount);
 FN_FUNC_FN_FN_FN(mix)
 FN_FUNC_FN_FN_F(mix)
 
@@ -853,7 +878,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) radians(float degrees);
+_RS_RUNTIME float __attribute__((const, overloadable)) radians(float degrees);
 FN_FUNC_FN(radians)
 
 /**
@@ -864,18 +889,18 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) step(float edge, float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) step(float edge, float v);
 FN_FUNC_FN_FN(step)
 FN_FUNC_FN_F(step)
 
 // not implemented
-extern float __attribute__((overloadable)) smoothstep(float, float, float);
-extern float2 __attribute__((overloadable)) smoothstep(float2, float2, float2);
-extern float3 __attribute__((overloadable)) smoothstep(float3, float3, float3);
-extern float4 __attribute__((overloadable)) smoothstep(float4, float4, float4);
-extern float2 __attribute__((overloadable)) smoothstep(float, float, float2);
-extern float3 __attribute__((overloadable)) smoothstep(float, float, float3);
-extern float4 __attribute__((overloadable)) smoothstep(float, float, float4);
+extern float __attribute__((const, overloadable)) smoothstep(float, float, float);
+extern float2 __attribute__((const, overloadable)) smoothstep(float2, float2, float2);
+extern float3 __attribute__((const, overloadable)) smoothstep(float3, float3, float3);
+extern float4 __attribute__((const, overloadable)) smoothstep(float4, float4, float4);
+extern float2 __attribute__((const, overloadable)) smoothstep(float, float, float2);
+extern float3 __attribute__((const, overloadable)) smoothstep(float, float, float3);
+extern float4 __attribute__((const, overloadable)) smoothstep(float, float, float4);
 
 /**
  * Return the sign of a value.
@@ -886,7 +911,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) sign(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) sign(float v);
 FN_FUNC_FN(sign)
 
 /**
@@ -894,15 +919,15 @@
  *
  * Supports 3,4 components
  */
-_RS_RUNTIME float3 __attribute__((overloadable)) cross(float3 lhs, float3 rhs);
-_RS_RUNTIME float4 __attribute__((overloadable)) cross(float4 lhs, float4 rhs);
+_RS_RUNTIME float3 __attribute__((const, overloadable)) cross(float3 lhs, float3 rhs);
+_RS_RUNTIME float4 __attribute__((const, overloadable)) cross(float4 lhs, float4 rhs);
 
 /**
  * Compute the dot product of two vectors.
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) dot(float lhs, float rhs);
+_RS_RUNTIME float __attribute__((const, overloadable)) dot(float lhs, float rhs);
 F_FUNC_FN_FN(dot)
 
 /**
@@ -910,7 +935,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) length(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) length(float v);
 F_FUNC_FN(length)
 
 /**
@@ -918,7 +943,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) distance(float lhs, float rhs);
+_RS_RUNTIME float __attribute__((const, overloadable)) distance(float lhs, float rhs);
 F_FUNC_FN_FN(distance)
 
 /**
@@ -926,7 +951,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) normalize(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) normalize(float v);
 FN_FUNC_FN(normalize)
 
 
@@ -938,7 +963,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) half_recip(float);
+_RS_RUNTIME float __attribute__((const, overloadable)) half_recip(float);
 FN_FUNC_FN(half_recip)
 
 /**
@@ -946,7 +971,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) half_sqrt(float);
+_RS_RUNTIME float __attribute__((const, overloadable)) half_sqrt(float);
 FN_FUNC_FN(half_sqrt)
 
 /**
@@ -954,7 +979,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) half_rsqrt(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) half_rsqrt(float v);
 FN_FUNC_FN(half_rsqrt)
 
 /**
@@ -962,7 +987,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) fast_length(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) fast_length(float v);
 F_FUNC_FN(fast_length)
 
 /**
@@ -970,7 +995,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) fast_distance(float lhs, float rhs);
+_RS_RUNTIME float __attribute__((const, overloadable)) fast_distance(float lhs, float rhs);
 F_FUNC_FN_FN(fast_distance)
 
 /**
@@ -978,7 +1003,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) fast_normalize(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) fast_normalize(float v);
 F_FUNC_FN(fast_normalize)
 
 #endif  // (defined(RS_VERSION) && (RS_VERSION >= 17))
@@ -996,7 +1021,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) native_exp2(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_exp2(float v);
 FN_FUNC_FN(native_exp2)
 
 /**
@@ -1006,7 +1031,7 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) native_exp(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_exp(float v);
 FN_FUNC_FN(native_exp)
 
 /**
@@ -1016,21 +1041,21 @@
  *
  * Supports 1,2,3,4 components
  */
-_RS_RUNTIME float __attribute__((overloadable)) native_exp10(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_exp10(float v);
 FN_FUNC_FN(native_exp10)
 
 
-_RS_RUNTIME float __attribute__((overloadable)) native_log2(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_log2(float v);
 FN_FUNC_FN(native_log2)
 
-_RS_RUNTIME float __attribute__((overloadable)) native_log(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_log(float v);
 FN_FUNC_FN(native_log)
 
-_RS_RUNTIME float __attribute__((overloadable)) native_log10(float v);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_log10(float v);
 FN_FUNC_FN(native_log10)
 
 
-_RS_RUNTIME float __attribute__((overloadable)) native_powr(float v, float y);
+_RS_RUNTIME float __attribute__((const, overloadable)) native_powr(float v, float y);
 FN_FUNC_FN_FN(native_powr)
 
 
diff --git a/scriptc/rs_math.rsh b/scriptc/rs_math.rsh
index 73040b3..4d3124c 100644
--- a/scriptc/rs_math.rsh
+++ b/scriptc/rs_math.rsh
@@ -49,7 +49,7 @@
 /**
  * Returns the fractional part of a float
  */
-extern float __attribute__((overloadable))
+extern float __attribute__((const, overloadable))
     rsFrac(float);
 
 
@@ -64,28 +64,28 @@
  * @param low
  * @param high
  */
-_RS_RUNTIME uint __attribute__((overloadable, always_inline)) rsClamp(uint amount, uint low, uint high);
+_RS_RUNTIME uint __attribute__((const, overloadable, always_inline)) rsClamp(uint amount, uint low, uint high);
 
 /**
  * \overload
  */
-_RS_RUNTIME int __attribute__((overloadable, always_inline)) rsClamp(int amount, int low, int high);
+_RS_RUNTIME int __attribute__((const, overloadable, always_inline)) rsClamp(int amount, int low, int high);
 /**
  * \overload
  */
-_RS_RUNTIME ushort __attribute__((overloadable, always_inline)) rsClamp(ushort amount, ushort low, ushort high);
+_RS_RUNTIME ushort __attribute__((const, overloadable, always_inline)) rsClamp(ushort amount, ushort low, ushort high);
 /**
  * \overload
  */
-_RS_RUNTIME short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+_RS_RUNTIME short __attribute__((const, overloadable, always_inline)) rsClamp(short amount, short low, short high);
 /**
  * \overload
  */
-_RS_RUNTIME uchar __attribute__((overloadable, always_inline)) rsClamp(uchar amount, uchar low, uchar high);
+_RS_RUNTIME uchar __attribute__((const, overloadable, always_inline)) rsClamp(uchar amount, uchar low, uchar high);
 /**
  * \overload
  */
-_RS_RUNTIME char __attribute__((overloadable, always_inline)) rsClamp(char amount, char low, char high);
+_RS_RUNTIME char __attribute__((const, overloadable, always_inline)) rsClamp(char amount, char low, char high);
 
 
 /**
@@ -202,7 +202,7 @@
  *
  * @return uchar4
  */
-_RS_RUNTIME uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b);
+_RS_RUNTIME uchar4 __attribute__((const, overloadable)) rsPackColorTo8888(float r, float g, float b);
 
 /**
  * Pack floating point (0-1) RGBA values into a uchar4.
@@ -214,7 +214,7 @@
  *
  * @return uchar4
  */
-_RS_RUNTIME uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a);
+_RS_RUNTIME uchar4 __attribute__((const, overloadable)) rsPackColorTo8888(float r, float g, float b, float a);
 
 /**
  * Pack floating point (0-1) RGB values into a uchar4.  The alpha component is
@@ -224,7 +224,7 @@
  *
  * @return uchar4
  */
-_RS_RUNTIME uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color);
+_RS_RUNTIME uchar4 __attribute__((const, overloadable)) rsPackColorTo8888(float3 color);
 
 /**
  * Pack floating point (0-1) RGBA values into a uchar4.
@@ -233,7 +233,7 @@
  *
  * @return uchar4
  */
-_RS_RUNTIME uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color);
+_RS_RUNTIME uchar4 __attribute__((const, overloadable)) rsPackColorTo8888(float4 color);
 
 /**
  * Unpack a uchar4 color to float4.  The resulting float range will be (0-1).
@@ -242,10 +242,10 @@
  *
  * @return float4
  */
-_RS_RUNTIME float4 rsUnpackColor8888(uchar4 c);
+_RS_RUNTIME float4 __attribute__((const)) rsUnpackColor8888(uchar4 c);
 
-_RS_RUNTIME uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v);
-_RS_RUNTIME float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v);
+_RS_RUNTIME uchar4 __attribute__((const, overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v);
+_RS_RUNTIME float4 __attribute__((const, overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v);
 
 
 #endif