Rename RenderScript -> Renderscript.

Change-Id: I187928033b47f3e3f4cb811a0b3562f479cfe417
diff --git a/lib/Renderscript/Android.mk b/lib/Renderscript/Android.mk
new file mode 100644
index 0000000..6989d5b
--- /dev/null
+++ b/lib/Renderscript/Android.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+LOCAL_PATH := $(call my-dir)
+
+#=====================================================================
+# Common: libbccRenderscript
+#=====================================================================
+
+libbcc_renderscript_SRC_FILES := \
+  RSCompiler.cpp \
+  RSCompilerDriver.cpp \
+  RSExecutable.cpp \
+  RSForEachExpand.cpp \
+  RSInfo.cpp \
+  RSInfoExtractor.cpp \
+  RSInfoReader.cpp \
+  RSInfoWriter.cpp \
+  RSScript.cpp
+
+#=====================================================================
+# Device Static Library: libbccRenderscript
+#=====================================================================
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libbccRenderscript
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+
+LOCAL_SRC_FILES := $(libbcc_renderscript_SRC_FILES)
+
+include $(LIBBCC_DEVICE_BUILD_MK)
+include $(LIBBCC_GEN_CONFIG_MK)
+include $(LLVM_DEVICE_BUILD_MK)
+include $(BUILD_STATIC_LIBRARY)
+
+
+#=====================================================================
+# Host Static Library: libbccRenderscript
+#=====================================================================
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libbccRenderscript
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_IS_HOST_MODULE := true
+
+LOCAL_SRC_FILES := $(libbcc_renderscript_SRC_FILES)
+
+include $(LIBBCC_HOST_BUILD_MK)
+include $(LIBBCC_GEN_CONFIG_MK)
+include $(LLVM_HOST_BUILD_MK)
+include $(BUILD_HOST_STATIC_LIBRARY)
+
+# Build Renderscript runtime (libclcore.bc)
+include $(LOCAL_PATH)/runtime/Android.mk
diff --git a/lib/Renderscript/RSCompiler.cpp b/lib/Renderscript/RSCompiler.cpp
new file mode 100644
index 0000000..7944c86
--- /dev/null
+++ b/lib/Renderscript/RSCompiler.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bcc/Renderscript/RSCompiler.h"
+
+#include <llvm/Module.h>
+#include <llvm/PassManager.h>
+#include <llvm/Transforms/IPO.h>
+
+#include "bcc/Renderscript/RSExecutable.h"
+#include "bcc/Renderscript/RSInfo.h"
+#include "bcc/Renderscript/RSScript.h"
+#include "bcc/Renderscript/RSTransforms.h"
+#include "bcc/Source.h"
+#include "bcc/Support/Log.h"
+
+using namespace bcc;
+
+bool RSCompiler::beforeAddLTOPasses(Script &pScript, llvm::PassManager &pPM) {
+  // Add a pass to internalize the symbols that don't need to have global
+  // visibility.
+  RSScript &script = static_cast<RSScript &>(pScript);
+  const RSInfo *info = script.getInfo();
+
+  // The vector contains the symbols that should not be internalized.
+  std::vector<const char *> export_symbols;
+
+  // Special RS functions should always be global symbols.
+  const char **special_functions = RSExecutable::SpecialFunctionNames;
+  while (*special_functions != NULL) {
+    export_symbols.push_back(*special_functions);
+    special_functions++;
+  }
+
+  // Visibility of symbols appeared in rs_export_var and rs_export_func should
+  // also be preserved.
+  const RSInfo::ExportVarNameListTy &export_vars = info->getExportVarNames();
+  const RSInfo::ExportFuncNameListTy &export_funcs = info->getExportFuncNames();
+
+  for (RSInfo::ExportVarNameListTy::const_iterator
+           export_var_iter = export_vars.begin(),
+           export_var_end = export_vars.end();
+       export_var_iter != export_var_end; export_var_iter++) {
+    export_symbols.push_back(*export_var_iter);
+  }
+
+  for (RSInfo::ExportFuncNameListTy::const_iterator
+           export_func_iter = export_funcs.begin(),
+           export_func_end = export_funcs.end();
+       export_func_iter != export_func_end; export_func_iter++) {
+    export_symbols.push_back(*export_func_iter);
+  }
+
+  // Expanded foreach functions should not be internalized, too.
+  const RSInfo::ExportForeachFuncListTy &export_foreach_func =
+      info->getExportForeachFuncs();
+  std::vector<std::string> expanded_foreach_funcs;
+  for (RSInfo::ExportForeachFuncListTy::const_iterator
+           foreach_func_iter = export_foreach_func.begin(),
+           foreach_func_end = export_foreach_func.end();
+       foreach_func_iter != foreach_func_end; foreach_func_iter++) {
+    std::string name(foreach_func_iter->first);
+    expanded_foreach_funcs.push_back(name.append(".expand"));
+  }
+
+  // Need to wait until ForEachExpandList is fully populated to fill in
+  // exported symbols.
+  for (size_t i = 0; i < expanded_foreach_funcs.size(); i++) {
+    export_symbols.push_back(expanded_foreach_funcs[i].c_str());
+  }
+
+  pPM.add(llvm::createInternalizePass(export_symbols));
+
+  return true;
+}
+
+bool RSCompiler::beforeExecuteLTOPasses(Script &pScript,
+                                        llvm::PassManager &pPM) {
+  // Execute a pass to expand foreach-able functions
+  llvm::PassManager rs_passes;
+
+  // Script passed to RSCompiler must be a RSScript.
+  RSScript &script = static_cast<RSScript &>(pScript);
+  const RSInfo *info = script.getInfo();
+  llvm::Module &module = script.getSource().getModule();
+
+  if (info == NULL) {
+    ALOGE("Missing RSInfo in RSScript to run the pass for foreach expansion on "
+          "%s!", module.getModuleIdentifier().c_str());
+    return false;
+  }
+
+  // Expand ForEach on CPU path to reduce launch overhead.
+  rs_passes.add(createRSForEachExpandPass(info->getExportForeachFuncs()));
+
+  // Execute the pass.
+  rs_passes.run(module);
+
+  return true;
+}
diff --git a/lib/Renderscript/RSCompilerDriver.cpp b/lib/Renderscript/RSCompilerDriver.cpp
new file mode 100644
index 0000000..c854b69
--- /dev/null
+++ b/lib/Renderscript/RSCompilerDriver.cpp
@@ -0,0 +1,406 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bcc/Renderscript/RSCompilerDriver.h"
+
+#include <llvm/Support/Path.h>
+
+#include "bcinfo/BitcodeWrapper.h"
+
+#include "bcc/Renderscript/RSExecutable.h"
+#include "bcc/Renderscript/RSScript.h"
+#include "bcc/Support/CompilerConfig.h"
+#include "bcc/Support/TargetCompilerConfigs.h"
+#include "bcc/Source.h"
+#include "bcc/Support/FileMutex.h"
+#include "bcc/Support/Log.h"
+#include "bcc/Support/InputFile.h"
+#include "bcc/Support/Initialization.h"
+#include "bcc/Support/Sha1Util.h"
+#include "bcc/Support/OutputFile.h"
+
+#include <cutils/properties.h>
+#include <utils/String8.h>
+#include <utils/StopWatch.h>
+
+using namespace bcc;
+
+namespace {
+
+bool is_force_recompile() {
+  char buf[PROPERTY_VALUE_MAX];
+
+  // Re-compile if floating point precision has been overridden.
+  property_get("debug.rs.precision", buf, "");
+  if (buf[0] != '\0') {
+    return true;
+  }
+
+  // Re-compile if debug.rs.forcerecompile is set.
+  property_get("debug.rs.forcerecompile", buf, "0");
+  if ((::strcmp(buf, "1") == 0) || (::strcmp(buf, "true") == 0)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+} // end anonymous namespace
+
+RSCompilerDriver::RSCompilerDriver() : mConfig(NULL), mCompiler() {
+  init::Initialize();
+  // Chain the symbol resolvers for BCC runtimes and RS runtimes.
+  mResolver.chainResolver(mBCCRuntime);
+  mResolver.chainResolver(mRSRuntime);
+}
+
+RSCompilerDriver::~RSCompilerDriver() {
+  delete mConfig;
+}
+
+RSExecutable *
+RSCompilerDriver::loadScriptCache(const char *pOutputPath,
+                                  const RSInfo::DependencyTableTy &pDeps) {
+  android::StopWatch load_time("bcc: RSCompilerDriver::loadScriptCache time");
+  RSExecutable *result = NULL;
+
+  if (is_force_recompile())
+    return NULL;
+
+  //===--------------------------------------------------------------------===//
+  // Acquire the read lock for reading output object file.
+  //===--------------------------------------------------------------------===//
+  FileMutex<FileBase::kReadLock> read_output_mutex(pOutputPath);
+
+  if (read_output_mutex.hasError() || !read_output_mutex.lock()) {
+    ALOGE("Unable to acquire the read lock for %s! (%s)", pOutputPath,
+          read_output_mutex.getErrorMessage().c_str());
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Read the output object file.
+  //===--------------------------------------------------------------------===//
+  InputFile *output_file = new (std::nothrow) InputFile(pOutputPath);
+
+  if ((output_file == NULL) || output_file->hasError()) {
+    ALOGE("Unable to open the %s for read! (%s)", pOutputPath,
+          output_file->getErrorMessage().c_str());
+    delete output_file;
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Acquire the read lock on output_file for reading its RS info file.
+  //===--------------------------------------------------------------------===//
+  android::String8 info_path = RSInfo::GetPath(*output_file);
+
+  if (!output_file->lock()) {
+    ALOGE("Unable to acquire the read lock on %s for reading %s! (%s)",
+          pOutputPath, info_path.string(),
+          output_file->getErrorMessage().c_str());
+    delete output_file;
+    return NULL;
+  }
+
+ //===---------------------------------------------------------------------===//
+  // Open and load the RS info file.
+  //===--------------------------------------------------------------------===//
+  InputFile info_file(info_path.string());
+  RSInfo *info = RSInfo::ReadFromFile(info_file, pDeps);
+
+  // Release the lock on output_file.
+  output_file->unlock();
+
+  if (info == NULL) {
+    delete output_file;
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Create the RSExecutable.
+  //===--------------------------------------------------------------------===//
+  result = RSExecutable::Create(*info, *output_file, mResolver);
+  if (result == NULL) {
+    delete output_file;
+    delete info;
+    return NULL;
+  }
+
+  return result;
+}
+
+bool RSCompilerDriver::setupConfig(const RSScript &pScript) {
+  bool changed = false;
+
+  const llvm::CodeGenOpt::Level script_opt_level =
+      static_cast<llvm::CodeGenOpt::Level>(pScript.getOptimizationLevel());
+
+  if (mConfig != NULL) {
+    // Renderscript bitcode may have their optimization flag configuration
+    // different than the previous run of RS compilation.
+    if (mConfig->getOptimizationLevel() != script_opt_level) {
+      mConfig->setOptimizationLevel(script_opt_level);
+      changed = true;
+    }
+  } else {
+    // Haven't run the compiler ever.
+    mConfig = new (std::nothrow) DefaultCompilerConfig();
+    if (mConfig == NULL) {
+      // Return false since mConfig remains NULL and out-of-memory.
+      return false;
+    }
+    mConfig->setOptimizationLevel(script_opt_level);
+    changed = true;
+  }
+
+#if defined(DEFAULT_ARM_CODEGEN)
+  // NEON should be disable when full-precision floating point is required.
+  assert((pScript.getInfo() != NULL) && "NULL RS info!");
+  if (pScript.getInfo()->getFloatPrecisionRequirement() == RSInfo::FP_Full) {
+    // Must be ARMCompilerConfig.
+    ARMCompilerConfig *arm_config = static_cast<ARMCompilerConfig *>(mConfig);
+    changed |= arm_config->enableNEON(/* pEnable */false);
+  }
+#endif
+
+  return changed;
+}
+
+RSExecutable *
+RSCompilerDriver::compileScript(RSScript &pScript,
+                                const char* pScriptName,
+                                const char *pOutputPath,
+                                const RSInfo::DependencyTableTy &pDeps) {
+  android::StopWatch compile_time("bcc: RSCompilerDriver::compileScript time");
+  RSExecutable *result = NULL;
+  RSInfo *info = NULL;
+
+  //===--------------------------------------------------------------------===//
+  // Extract RS-specific information from source bitcode.
+  //===--------------------------------------------------------------------===//
+  // RS info may contains configuration (such as #optimization_level) to the
+  // compiler therefore it should be extracted before compilation.
+  info = RSInfo::ExtractFromSource(pScript.getSource(), pDeps);
+  if (info == NULL) {
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Associate script with its info
+  //===--------------------------------------------------------------------===//
+  // This is required since RS compiler may need information in the info file
+  // to do some transformation (e.g., expand foreach-able function.)
+  pScript.setInfo(info);
+
+  //===--------------------------------------------------------------------===//
+  // Link RS script with Renderscript runtime.
+  //===--------------------------------------------------------------------===//
+  if (!RSScript::LinkRuntime(pScript)) {
+    ALOGE("Failed to link script '%s' with Renderscript runtime!", pScriptName);
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Acquire the write lock for writing output object file.
+  //===--------------------------------------------------------------------===//
+  FileMutex<FileBase::kWriteLock> write_output_mutex(pOutputPath);
+
+  if (write_output_mutex.hasError() || !write_output_mutex.lock()) {
+    ALOGE("Unable to acquire the lock for writing %s! (%s)",
+          pOutputPath, write_output_mutex.getErrorMessage().c_str());
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Open the output file for write.
+  //===--------------------------------------------------------------------===//
+  OutputFile *output_file =
+      new (std::nothrow) OutputFile(pOutputPath, FileBase::kTruncate);
+
+  if ((output_file == NULL) || output_file->hasError()) {
+    ALOGE("Unable to open the %s for write! (%s)", pOutputPath,
+          output_file->getErrorMessage().c_str());
+    delete info;
+    delete output_file;
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Setup the config to the compiler.
+  //===--------------------------------------------------------------------===//
+  bool compiler_need_reconfigure = setupConfig(pScript);
+
+  if (mConfig == NULL) {
+    ALOGE("Failed to setup config for RS compiler to compile %s!", pOutputPath);
+    delete info;
+    delete output_file;
+    return NULL;
+  }
+
+  // Compiler need to re-config if it's haven't run the config() yet or the
+  // configuration it referenced is changed.
+  if (compiler_need_reconfigure) {
+    Compiler::ErrorCode err = mCompiler.config(*mConfig);
+    if (err != Compiler::kSuccess) {
+      ALOGE("Failed to config the RS compiler for %s! (%s)",pOutputPath,
+            Compiler::GetErrorString(err));
+      delete info;
+      delete output_file;
+      return NULL;
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Run the compiler.
+  //===--------------------------------------------------------------------===//
+  Compiler::ErrorCode compile_result = mCompiler.compile(pScript, *output_file);
+  if (compile_result != Compiler::kSuccess) {
+    ALOGE("Unable to compile the source to file %s! (%s)", pOutputPath,
+          Compiler::GetErrorString(compile_result));
+    delete info;
+    delete output_file;
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Create the RSExecutable.
+  //===--------------------------------------------------------------------===//
+  result = RSExecutable::Create(*info, *output_file, mResolver);
+  if (result == NULL) {
+    delete info;
+    delete output_file;
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Dump the disassembly for debug when possible.
+  //===--------------------------------------------------------------------===//
+#if USE_DISASSEMBLER
+  OutputFile *disassembly_output =
+      new (std::nothrow) OutputFile(DEBUG_DISASSEMBLER_FILE,
+                                    FileBase::kAppend);
+
+  if (disassembly_output != NULL) {
+    result->dumpDisassembly(*disassembly_output);
+    delete disassembly_output;
+  }
+#endif
+
+  //===--------------------------------------------------------------------===//
+  // Write out the RS info file.
+  //===--------------------------------------------------------------------===//
+  // Note that write failure only results in a warning since the source is
+  // successfully compiled and loaded.
+  if (!result->syncInfo(/* pForce */true)) {
+    ALOGW("%s was successfully compiled and loaded but its RS info file failed "
+          "to write out!", pOutputPath);
+  }
+
+  return result;
+}
+
+RSExecutable *RSCompilerDriver::build(BCCContext &pContext,
+                                      const char *pCacheDir,
+                                      const char *pResName,
+                                      const char *pBitcode,
+                                      size_t pBitcodeSize) {
+  android::StopWatch build_time("bcc: RSCompilerDriver::build time");
+  //===--------------------------------------------------------------------===//
+  // Check parameters.
+  //===--------------------------------------------------------------------===//
+  if ((pCacheDir == NULL) || (pResName == NULL)) {
+    ALOGE("Invalid parameter passed to RSCompilerDriver::build()! (cache dir: "
+          "%s, resource name: %s)", ((pCacheDir) ? pCacheDir : "(null)"),
+                                    ((pResName) ? pResName : "(null)"));
+    return NULL;
+  }
+
+  if ((pBitcode == NULL) || (pBitcodeSize <= 0)) {
+    ALOGE("No bitcode supplied! (bitcode: %p, size of bitcode: %u)",
+          pBitcode, static_cast<unsigned>(pBitcodeSize));
+    return NULL;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Prepare dependency information.
+  //===--------------------------------------------------------------------===//
+  RSInfo::DependencyTableTy dep_info;
+  uint8_t bitcode_sha1[20];
+  Sha1Util::GetSHA1DigestFromBuffer(bitcode_sha1, pBitcode, pBitcodeSize);
+  dep_info.push(std::make_pair(pResName, bitcode_sha1));
+
+  //===--------------------------------------------------------------------===//
+  // Construct output path.
+  //===--------------------------------------------------------------------===//
+  llvm::sys::Path output_path(pCacheDir);
+
+  // {pCacheDir}/{pResName}
+  if (!output_path.appendComponent(pResName)) {
+    ALOGE("Failed to construct output path %s/%s!", pCacheDir, pResName);
+    return NULL;
+  }
+
+  // {pCacheDir}/{pResName}.o
+  output_path.appendSuffix("o");
+
+  //===--------------------------------------------------------------------===//
+  // Load cache.
+  //===--------------------------------------------------------------------===//
+  RSExecutable *result = loadScriptCache(output_path.c_str(), dep_info);
+
+  if (result != NULL) {
+    // Cache hit
+    return result;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Load the bitcode and create script.
+  //===--------------------------------------------------------------------===//
+  Source *source = Source::CreateFromBuffer(pContext, pResName,
+                                            pBitcode, pBitcodeSize);
+  if (source == NULL) {
+    return NULL;
+  }
+
+  RSScript *script = new (std::nothrow) RSScript(*source);
+  if (script == NULL) {
+    ALOGE("Out of memory when create Script object for '%s'! (output: %s)",
+          pResName, output_path.c_str());
+    delete source;
+    return NULL;
+  }
+
+  // Read information from bitcode wrapper.
+  bcinfo::BitcodeWrapper wrapper(pBitcode, pBitcodeSize);
+  script->setCompilerVersion(wrapper.getCompilerVersion());
+  script->setOptimizationLevel(static_cast<RSScript::OptimizationLevel>(
+                                   wrapper.getOptimizationLevel()));
+
+  //===--------------------------------------------------------------------===//
+  // Compile the script
+  //===--------------------------------------------------------------------===//
+  result = compileScript(*script, pResName, output_path.c_str(), dep_info);
+
+  // Script is no longer used. Free it to get more memory.
+  delete script;
+
+  if (result == NULL) {
+    return NULL;
+  }
+
+  return result;
+}
diff --git a/lib/Renderscript/RSExecutable.cpp b/lib/Renderscript/RSExecutable.cpp
new file mode 100644
index 0000000..c73fd5e
--- /dev/null
+++ b/lib/Renderscript/RSExecutable.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bcc/Renderscript/RSExecutable.h"
+
+#include "bcc/Config/Config.h"
+#include "bcc/Support/Disassembler.h"
+#include "bcc/Support/FileBase.h"
+#include "bcc/Support/Log.h"
+#include "bcc/Support/OutputFile.h"
+#include "bcc/ExecutionEngine/SymbolResolverProxy.h"
+
+#include <utils/String8.h>
+
+using namespace bcc;
+
+const char *RSExecutable::SpecialFunctionNames[] = {
+  "root",
+  "init",
+  ".rs.dtor",
+  // Must be NULL-terminated.
+  NULL
+};
+
+RSExecutable *RSExecutable::Create(RSInfo &pInfo,
+                                   FileBase &pObjFile,
+                                   SymbolResolverProxy &pResolver) {
+  // Load the object file. Enable the GDB's JIT debugging if the script contains
+  // debug information.
+  ObjectLoader *loader = ObjectLoader::Load(pObjFile,
+                                            pResolver,
+                                            pInfo.hasDebugInformation());
+  if (loader == NULL) {
+    return NULL;
+  }
+
+  // Now, all things required to build a RSExecutable object are ready.
+  RSExecutable *result = new (std::nothrow) RSExecutable(pInfo,
+                                                         pObjFile,
+                                                         *loader);
+  if (result == NULL) {
+    ALOGE("Out of memory when create object to hold RS result file for %s!",
+          pObjFile.getName().c_str());
+    return NULL;
+  }
+
+  unsigned idx;
+  // Resolve addresses of RS export vars.
+  idx = 0;
+  const RSInfo::ExportVarNameListTy &export_var_names =
+      pInfo.getExportVarNames();
+  for (RSInfo::ExportVarNameListTy::const_iterator
+           var_iter = export_var_names.begin(),
+           var_end = export_var_names.end(); var_iter != var_end;
+       var_iter++, idx++) {
+    const char *name = *var_iter;
+    void *addr = result->getSymbolAddress(name);
+    if (addr == NULL) {
+      ALOGW("RS export var at entry #%u named %s cannot be found in the result "
+            "object!", idx, name);
+    }
+    result->mExportVarAddrs.push_back(addr);
+  }
+
+  // Resolve addresses of RS export functions.
+  idx = 0;
+  const RSInfo::ExportFuncNameListTy &export_func_names =
+      pInfo.getExportFuncNames();
+  for (RSInfo::ExportFuncNameListTy::const_iterator
+           func_iter = export_func_names.begin(),
+           func_end = export_func_names.end(); func_iter != func_end;
+       func_iter++, idx++) {
+    const char *name = *func_iter;
+    void *addr = result->getSymbolAddress(name);
+    if (addr == NULL) {
+      ALOGW("RS export func at entry #%u named %s cannot be found in the result"
+            " object!", idx, name);
+    }
+    result->mExportFuncAddrs.push_back(addr);
+  }
+
+  // Resolve addresses of expanded RS foreach function.
+  idx = 0;
+  const RSInfo::ExportForeachFuncListTy &export_foreach_funcs =
+      pInfo.getExportForeachFuncs();
+  for (RSInfo::ExportForeachFuncListTy::const_iterator
+           foreach_iter = export_foreach_funcs.begin(),
+           foreach_end = export_foreach_funcs.end();
+       foreach_iter != foreach_end; foreach_iter++, idx++) {
+    const char *func_name = foreach_iter->first;
+    android::String8 expanded_func_name(func_name);
+    expanded_func_name.append(".expand");
+    void *addr = result->getSymbolAddress(expanded_func_name.string());
+    if (addr == NULL) {
+      ALOGW("Expanded RS foreach at entry #%u named %s cannot be found in the "
+            "result object!", idx, expanded_func_name.string());
+    }
+    result->mExportForeachFuncAddrs.push_back(addr);
+  }
+
+  // Copy pragma key/value pairs from RSInfo::getPragmas() into mPragmaKeys and
+  // mPragmaValues, respectively.
+  const RSInfo::PragmaListTy &pragmas = pInfo.getPragmas();
+  for (RSInfo::PragmaListTy::const_iterator pragma_iter = pragmas.begin(),
+          pragma_end = pragmas.end(); pragma_iter != pragma_end;
+       pragma_iter++){
+    result->mPragmaKeys.push_back(pragma_iter->first);
+    result->mPragmaValues.push_back(pragma_iter->second);
+  }
+
+  return result;
+}
+
+bool RSExecutable::syncInfo(bool pForce) {
+  if (!pForce && !mIsInfoDirty) {
+    return true;
+  }
+
+  android::String8 info_path = RSInfo::GetPath(*mObjFile);
+  OutputFile info_file(info_path.string(), FileBase::kTruncate);
+
+  if (info_file.hasError()) {
+    ALOGE("Failed to open the info file %s for write! (%s)", info_path.string(),
+          info_file.getErrorMessage().c_str());
+    return false;
+  }
+
+  // Operation to the RS info file need to acquire the lock on the output file
+  // first.
+  if (!mObjFile->lock(FileBase::kWriteLock)) {
+    ALOGE("Write to RS info file %s required the acquisition of the write lock "
+          "on %s but got failure! (%s)", info_path.string(),
+          mObjFile->getName().c_str(), info_file.getErrorMessage().c_str());
+    return false;
+  }
+
+  // Perform the write.
+  if (!mInfo->write(info_file)) {
+    ALOGE("Failed to sync the RS info file %s!", info_path.string());
+    mObjFile->unlock();
+    return false;
+  }
+
+  mObjFile->unlock();
+  mIsInfoDirty = false;
+  return true;
+}
+
+void RSExecutable::dumpDisassembly(OutputFile &pOutput) const {
+#if DEBUG_MC_DISASSEMBLER
+  if (pOutput.hasError()) {
+    return;
+  }
+
+  // Get MC codegen emitted function name list.
+  android::Vector<const char *> func_list;
+
+  if (!mLoader->getSymbolNameList(func_list, ObjectLoader::kFunctionType)) {
+    ALOGW("Failed to get the list of function name in %s for disassembly!",
+          mObjFile->getName().c_str());
+  } else {
+    // Disassemble each function
+    for (size_t i = 0, e = func_list.size(); i != e; i++) {
+      const char* func_name = func_list[i];
+      void *func = mLoader->getSymbolAddress(func_name);
+      size_t func_size = mLoader->getSymbolSize(func_name);
+
+      if (func == NULL) {
+        continue;
+      }
+      DisassembleResult result =
+          Disassemble(pOutput, DEFAULT_TARGET_TRIPLE_STRING, func_name,
+                      reinterpret_cast<const uint8_t *>(func), func_size);
+
+      if (result != kDisassembleSuccess) {
+        ALOGW("Failed to disassemble the function %s in %s (error code=%zu)!",
+              func_name, mObjFile->getName().c_str(), static_cast<size_t>(result));
+
+        if (result != kDisassembleInvalidInstruction) {
+          ALOGW("And the error occured in disassembler is fatal. Abort "
+                "disassembler on remaining functions!");
+          break;
+        }
+      }
+    }
+  }
+#endif
+  return;
+}
+
+RSExecutable::~RSExecutable() {
+  syncInfo();
+  delete mInfo;
+  delete mObjFile;
+  delete mLoader;
+}
diff --git a/lib/Renderscript/RSForEachExpand.cpp b/lib/Renderscript/RSForEachExpand.cpp
new file mode 100644
index 0000000..dfc358e
--- /dev/null
+++ b/lib/Renderscript/RSForEachExpand.cpp
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define RS_FOREACH_EXPAND_PASS_NDEBUG 0
+#include "bcc/Renderscript/RSTransforms.h"
+
+#include <cstdlib>
+
+#include <llvm/DerivedTypes.h>
+#include <llvm/Function.h>
+#include <llvm/Instructions.h>
+#include <llvm/Module.h>
+#include <llvm/Pass.h>
+#include <llvm/Support/IRBuilder.h>
+#include <llvm/Type.h>
+
+#include "bcc/Config/Config.h"
+#include "bcc/Renderscript/RSInfo.h"
+#include "bcc/Support/Log.h"
+
+using namespace bcc;
+
+namespace {
+
+/* RSForEachExpandPass - This pass operates on functions that are able to be
+ * called via rsForEach() or "foreach_<NAME>". We create an inner loop for the
+ * ForEach-able function to be invoked over the appropriate data cells of the
+ * input/output allocations (adjusting other relevant parameters as we go). We
+ * support doing this for any ForEach-able compute kernels. The new function
+ * name is the original function name followed by ".expand". Note that we
+ * still generate code for the original function.
+ */
+class RSForEachExpandPass : public llvm::ModulePass {
+private:
+  static char ID;
+
+  llvm::Module *M;
+  llvm::LLVMContext *C;
+
+  const RSInfo::ExportForeachFuncListTy &mFuncs;
+
+  uint32_t getRootSignature(llvm::Function *F) {
+    const llvm::NamedMDNode *ExportForEachMetadata =
+        M->getNamedMetadata("#rs_export_foreach");
+
+    if (!ExportForEachMetadata) {
+      llvm::SmallVector<llvm::Type*, 8> RootArgTys;
+      for (llvm::Function::arg_iterator B = F->arg_begin(),
+                                        E = F->arg_end();
+           B != E;
+           ++B) {
+        RootArgTys.push_back(B->getType());
+      }
+
+      // For pre-ICS bitcode, we may not have signature information. In that
+      // case, we use the size of the RootArgTys to select the number of
+      // arguments.
+      return (1 << RootArgTys.size()) - 1;
+    }
+
+#if !RS_FOREACH_EXPAND_PASS_NDEBUG
+    if (ExportForEachMetadata->getNumOperands() <= 0) {
+      ALOGE("Assert failed at %s:%d: Invalid #rs_export_foreach metadata in "
+            " '%s'!", __FILE__, __LINE__, M->getModuleIdentifier().c_str());
+      ::abort();
+    }
+#endif
+
+    // We only handle the case for legacy root() functions here, so this is
+    // hard-coded to look at only the first such function.
+    llvm::MDNode *SigNode = ExportForEachMetadata->getOperand(0);
+    if (SigNode != NULL && SigNode->getNumOperands() == 1) {
+      llvm::Value *SigVal = SigNode->getOperand(0);
+      if (SigVal->getValueID() == llvm::Value::MDStringVal) {
+        llvm::StringRef SigString =
+            static_cast<llvm::MDString*>(SigVal)->getString();
+        uint32_t Signature = 0;
+        if (SigString.getAsInteger(10, Signature)) {
+          ALOGE("Non-integer signature value '%s'", SigString.str().c_str());
+          return 0;
+        }
+        return Signature;
+      }
+    }
+
+    return 0;
+  }
+
+  static bool hasIn(uint32_t Signature) {
+    return Signature & 1;
+  }
+
+  static bool hasOut(uint32_t Signature) {
+    return Signature & 2;
+  }
+
+  static bool hasUsrData(uint32_t Signature) {
+    return Signature & 4;
+  }
+
+  static bool hasX(uint32_t Signature) {
+    return Signature & 8;
+  }
+
+  static bool hasY(uint32_t Signature) {
+    return Signature & 16;
+  }
+
+public:
+  RSForEachExpandPass(const RSInfo::ExportForeachFuncListTy &pForeachFuncs)
+      : ModulePass(ID), M(NULL), C(NULL), mFuncs(pForeachFuncs) {
+  }
+
+  /* Performs the actual optimization on a selected function. On success, the
+   * Module will contain a new function of the name "<NAME>.expand" that
+   * invokes <NAME>() in a loop with the appropriate parameters.
+   */
+  bool ExpandFunction(llvm::Function *F, uint32_t Signature) {
+    ALOGV("Expanding ForEach-able Function %s", F->getName().str().c_str());
+
+    if (!Signature) {
+      Signature = getRootSignature(F);
+      if (!Signature) {
+        // We couldn't determine how to expand this function based on its
+        // function signature.
+        return false;
+      }
+    }
+
+    llvm::Type *VoidPtrTy = llvm::Type::getInt8PtrTy(*C);
+    llvm::Type *Int32Ty = llvm::Type::getInt32Ty(*C);
+    llvm::Type *SizeTy = Int32Ty;
+
+    /* Defined in frameworks/base/libs/rs/rs_hal.h:
+     *
+     * struct RsForEachStubParamStruct {
+     *   const void *in;
+     *   void *out;
+     *   const void *usr;
+     *   size_t usr_len;
+     *   uint32_t x;
+     *   uint32_t y;
+     *   uint32_t z;
+     *   uint32_t lod;
+     *   enum RsAllocationCubemapFace face;
+     *   uint32_t ar[16];
+     * };
+     */
+    llvm::SmallVector<llvm::Type*, 9> StructTys;
+    StructTys.push_back(VoidPtrTy);  // const void *in
+    StructTys.push_back(VoidPtrTy);  // void *out
+    StructTys.push_back(VoidPtrTy);  // const void *usr
+    StructTys.push_back(SizeTy);     // size_t usr_len
+    StructTys.push_back(Int32Ty);    // uint32_t x
+    StructTys.push_back(Int32Ty);    // uint32_t y
+    StructTys.push_back(Int32Ty);    // uint32_t z
+    StructTys.push_back(Int32Ty);    // uint32_t lod
+    StructTys.push_back(Int32Ty);    // enum RsAllocationCubemapFace
+    StructTys.push_back(llvm::ArrayType::get(Int32Ty, 16));  // uint32_t ar[16]
+
+    llvm::Type *ForEachStubPtrTy = llvm::StructType::create(
+        StructTys, "RsForEachStubParamStruct")->getPointerTo();
+
+    /* Create the function signature for our expanded function.
+     * void (const RsForEachStubParamStruct *p, uint32_t x1, uint32_t x2,
+     *       uint32_t instep, uint32_t outstep)
+     */
+    llvm::SmallVector<llvm::Type*, 8> ParamTys;
+    ParamTys.push_back(ForEachStubPtrTy);  // const RsForEachStubParamStruct *p
+    ParamTys.push_back(Int32Ty);           // uint32_t x1
+    ParamTys.push_back(Int32Ty);           // uint32_t x2
+    ParamTys.push_back(Int32Ty);           // uint32_t instep
+    ParamTys.push_back(Int32Ty);           // uint32_t outstep
+
+    llvm::FunctionType *FT =
+        llvm::FunctionType::get(llvm::Type::getVoidTy(*C), ParamTys, false);
+    llvm::Function *ExpandedFunc =
+        llvm::Function::Create(FT,
+                               llvm::GlobalValue::ExternalLinkage,
+                               F->getName() + ".expand", M);
+
+    // Create and name the actual arguments to this expanded function.
+    llvm::SmallVector<llvm::Argument*, 8> ArgVec;
+    for (llvm::Function::arg_iterator B = ExpandedFunc->arg_begin(),
+                                      E = ExpandedFunc->arg_end();
+         B != E;
+         ++B) {
+      ArgVec.push_back(B);
+    }
+
+    if (ArgVec.size() != 5) {
+      ALOGE("Incorrect number of arguments to function: %zu",
+            ArgVec.size());
+      return false;
+    }
+    llvm::Value *Arg_p = ArgVec[0];
+    llvm::Value *Arg_x1 = ArgVec[1];
+    llvm::Value *Arg_x2 = ArgVec[2];
+    llvm::Value *Arg_instep = ArgVec[3];
+    llvm::Value *Arg_outstep = ArgVec[4];
+
+    Arg_p->setName("p");
+    Arg_x1->setName("x1");
+    Arg_x2->setName("x2");
+    Arg_instep->setName("instep");
+    Arg_outstep->setName("outstep");
+
+    // Construct the actual function body.
+    llvm::BasicBlock *Begin =
+        llvm::BasicBlock::Create(*C, "Begin", ExpandedFunc);
+    llvm::IRBuilder<> Builder(Begin);
+
+    // uint32_t X = x1;
+    llvm::AllocaInst *AX = Builder.CreateAlloca(Int32Ty, 0, "AX");
+    Builder.CreateStore(Arg_x1, AX);
+
+    // Collect and construct the arguments for the kernel().
+    // Note that we load any loop-invariant arguments before entering the Loop.
+    llvm::Function::arg_iterator Args = F->arg_begin();
+
+    llvm::Type *InTy = NULL;
+    llvm::AllocaInst *AIn = NULL;
+    if (hasIn(Signature)) {
+      InTy = Args->getType();
+      AIn = Builder.CreateAlloca(InTy, 0, "AIn");
+      Builder.CreateStore(Builder.CreatePointerCast(Builder.CreateLoad(
+          Builder.CreateStructGEP(Arg_p, 0)), InTy), AIn);
+      Args++;
+    }
+
+    llvm::Type *OutTy = NULL;
+    llvm::AllocaInst *AOut = NULL;
+    if (hasOut(Signature)) {
+      OutTy = Args->getType();
+      AOut = Builder.CreateAlloca(OutTy, 0, "AOut");
+      Builder.CreateStore(Builder.CreatePointerCast(Builder.CreateLoad(
+          Builder.CreateStructGEP(Arg_p, 1)), OutTy), AOut);
+      Args++;
+    }
+
+    llvm::Value *UsrData = NULL;
+    if (hasUsrData(Signature)) {
+      llvm::Type *UsrDataTy = Args->getType();
+      UsrData = Builder.CreatePointerCast(Builder.CreateLoad(
+          Builder.CreateStructGEP(Arg_p, 2)), UsrDataTy);
+      UsrData->setName("UsrData");
+      Args++;
+    }
+
+    if (hasX(Signature)) {
+      Args++;
+    }
+
+    llvm::Value *Y = NULL;
+    if (hasY(Signature)) {
+      Y = Builder.CreateLoad(Builder.CreateStructGEP(Arg_p, 5), "Y");
+      Args++;
+    }
+
+#if !RS_FOREACH_EXPAND_PASS_NDEBUG
+    if (Args != F->arg_end()) {
+      ALOGE("Assert failed at %s:%d: Invalid signature to the foreach function "
+            "'%s'!", __FILE__, __LINE__, F->getName().str().c_str());
+      ::abort();
+    }
+#endif
+
+    llvm::BasicBlock *Loop = llvm::BasicBlock::Create(*C, "Loop", ExpandedFunc);
+    llvm::BasicBlock *Exit = llvm::BasicBlock::Create(*C, "Exit", ExpandedFunc);
+
+    // if (x1 < x2) goto Loop; else goto Exit;
+    llvm::Value *Cond = Builder.CreateICmpSLT(Arg_x1, Arg_x2);
+    Builder.CreateCondBr(Cond, Loop, Exit);
+
+    // Loop:
+    Builder.SetInsertPoint(Loop);
+
+    // Populate the actual call to kernel().
+    llvm::SmallVector<llvm::Value*, 8> RootArgs;
+
+    llvm::Value *In = NULL;
+    llvm::Value *Out = NULL;
+
+    if (AIn) {
+      In = Builder.CreateLoad(AIn, "In");
+      RootArgs.push_back(In);
+    }
+
+    if (AOut) {
+      Out = Builder.CreateLoad(AOut, "Out");
+      RootArgs.push_back(Out);
+    }
+
+    if (UsrData) {
+      RootArgs.push_back(UsrData);
+    }
+
+    // We always have to load X, since it is used to iterate through the loop.
+    llvm::Value *X = Builder.CreateLoad(AX, "X");
+    if (hasX(Signature)) {
+      RootArgs.push_back(X);
+    }
+
+    if (Y) {
+      RootArgs.push_back(Y);
+    }
+
+    Builder.CreateCall(F, RootArgs);
+
+    if (In) {
+      // In += instep
+      llvm::Value *NewIn = Builder.CreateIntToPtr(Builder.CreateNUWAdd(
+          Builder.CreatePtrToInt(In, Int32Ty), Arg_instep), InTy);
+      Builder.CreateStore(NewIn, AIn);
+    }
+
+    if (Out) {
+      // Out += outstep
+      llvm::Value *NewOut = Builder.CreateIntToPtr(Builder.CreateNUWAdd(
+          Builder.CreatePtrToInt(Out, Int32Ty), Arg_outstep), OutTy);
+      Builder.CreateStore(NewOut, AOut);
+    }
+
+    // X++;
+    llvm::Value *XPlusOne =
+        Builder.CreateNUWAdd(X, llvm::ConstantInt::get(Int32Ty, 1));
+    Builder.CreateStore(XPlusOne, AX);
+
+    // If (X < x2) goto Loop; else goto Exit;
+    Cond = Builder.CreateICmpSLT(XPlusOne, Arg_x2);
+    Builder.CreateCondBr(Cond, Loop, Exit);
+
+    // Exit:
+    Builder.SetInsertPoint(Exit);
+    Builder.CreateRetVoid();
+
+    return true;
+  }
+
+  virtual bool runOnModule(llvm::Module &M) {
+    bool Changed = false;
+    this->M = &M;
+    C = &M.getContext();
+
+    for (RSInfo::ExportForeachFuncListTy::const_iterator
+             func_iter = mFuncs.begin(), func_end = mFuncs.end();
+         func_iter != func_end; func_iter++) {
+      const char *name = func_iter->first;
+      uint32_t signature = func_iter->second;
+      llvm::Function *kernel = M.getFunction(name);
+      if (kernel && kernel->getReturnType()->isVoidTy()) {
+        Changed |= ExpandFunction(kernel, signature);
+      }
+    }
+
+    return Changed;
+  }
+
+  virtual const char *getPassName() const {
+    return "ForEach-able Function Expansion";
+  }
+
+}; // end RSForEachExpandPass
+
+} // end anonymous namespace
+
+char RSForEachExpandPass::ID = 0;
+
+namespace bcc {
+
+llvm::ModulePass *
+createRSForEachExpandPass(const RSInfo::ExportForeachFuncListTy &pForeachFuncs){
+  return new RSForEachExpandPass(pForeachFuncs);
+}
+
+} // end namespace bcc
diff --git a/lib/Renderscript/RSInfo.cpp b/lib/Renderscript/RSInfo.cpp
new file mode 100644
index 0000000..dbf8657
--- /dev/null
+++ b/lib/Renderscript/RSInfo.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#include "bcc/Renderscript/RSInfo.h"
+
+#include <dlfcn.h>
+
+#include <cstring>
+#include <new>
+
+#include "bcc/Support/FileBase.h"
+#include "bcc/Support/Log.h"
+
+#include <cutils/properties.h>
+
+using namespace bcc;
+
+const char RSInfo::LibBCCPath[] = "/system/lib/libbcc.so";
+const char RSInfo::LibRSPath[] = "/system/lib/libRS.so";
+const char RSInfo::LibCLCorePath[] = "/system/lib/libclcore.bc";
+#if defined(ARCH_ARM_HAVE_NEON)
+const char RSInfo::LibCLCoreNEONPath[] = "/system/lib/libclcore_neon.bc";
+#endif
+
+const uint8_t *RSInfo::LibBCCSHA1 = NULL;
+const uint8_t *RSInfo::LibRSSHA1 = NULL;
+const uint8_t *RSInfo::LibCLCoreSHA1 = NULL;
+#if defined(ARCH_ARM_HAVE_NEON)
+const uint8_t *RSInfo::LibCLCoreNEONSHA1 = NULL;
+#endif
+
+void RSInfo::LoadBuiltInSHA1Information() {
+  if (LibBCCSHA1 != NULL) {
+    // Loaded before.
+    return;
+  }
+
+  void *h = ::dlopen("/system/lib/libbcc.sha1.so", RTLD_LAZY | RTLD_NOW);
+  if (h == NULL) {
+    ALOGE("Failed to load SHA-1 information from shared library '"
+          "/system/lib/libbcc.sha1.so'! (%s)", ::dlerror());
+    return;
+  }
+
+  LibBCCSHA1 = reinterpret_cast<const uint8_t *>(::dlsym(h, "libbcc_so_SHA1"));
+  LibRSSHA1 = reinterpret_cast<const uint8_t *>(::dlsym(h, "libRS_so_SHA1"));
+  LibCLCoreSHA1 =
+      reinterpret_cast<const uint8_t *>(::dlsym(h, "libclcore_bc_SHA1"));
+#if defined(ARCH_ARM_HAVE_NEON)
+  LibCLCoreNEONSHA1 =
+      reinterpret_cast<const uint8_t *>(::dlsym(h, "libclcore_neon_bc_SHA1"));
+#endif
+
+  return;
+}
+
+android::String8 RSInfo::GetPath(const FileBase &pFile) {
+  android::String8 result(pFile.getName().c_str());
+  result.append(".info");
+  return result;
+}
+
+#define PRINT_DEPENDENCY(PREFIX, N, X) \
+        ALOGV("\t" PREFIX "Source name: %s, "                                 \
+                          "SHA-1: %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"   \
+                                 "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",  \
+              (N), (X)[ 0], (X)[ 1], (X)[ 2], (X)[ 3], (X)[ 4], (X)[ 5],      \
+                   (X)[ 6], (X)[ 7], (X)[ 8], (X)[ 9], (X)[10], (X)[11],      \
+                   (X)[12], (X)[13], (X)[14], (X)[15], (X)[16], (X)[17],      \
+                   (X)[18], (X)[19]);
+
+bool RSInfo::CheckDependency(const RSInfo &pInfo,
+                             const char *pInputFilename,
+                             const DependencyTableTy &pDeps) {
+  // Built-in dependencies are libbcc.so, libRS.so and libclcore.bc plus
+  // libclcore_neon.bc if NEON is available on the target device.
+#if !defined(ARCH_ARM_HAVE_NEON)
+  static const unsigned NumBuiltInDependencies = 3;
+#else
+  static const unsigned NumBuiltInDependencies = 4;
+#endif
+
+  LoadBuiltInSHA1Information();
+
+  if (pInfo.mDependencyTable.size() != (pDeps.size() + NumBuiltInDependencies)) {
+    ALOGD("Number of dependencies recorded mismatch (%lu v.s. %lu) in %s!",
+          static_cast<unsigned long>(pInfo.mDependencyTable.size()),
+          static_cast<unsigned long>(pDeps.size()), pInputFilename);
+    return false;
+  } else {
+    // Built-in dependencies always go first.
+    const std::pair<const char *, const uint8_t *> &cache_libbcc_dep =
+        pInfo.mDependencyTable[0];
+    const std::pair<const char *, const uint8_t *> &cache_libRS_dep =
+        pInfo.mDependencyTable[1];
+    const std::pair<const char *, const uint8_t *> &cache_libclcore_dep =
+        pInfo.mDependencyTable[2];
+#if defined(ARCH_ARM_HAVE_NEON)
+    const std::pair<const char *, const uint8_t *> &cache_libclcore_neon_dep =
+        pInfo.mDependencyTable[3];
+#endif
+
+    // Check libbcc.so.
+    if (::memcmp(cache_libbcc_dep.second, LibBCCSHA1, SHA1_DIGEST_LENGTH) != 0) {
+        ALOGD("Cache %s is dirty due to %s has been updated.", pInputFilename,
+              LibBCCPath);
+        PRINT_DEPENDENCY("current - ", LibBCCPath, LibBCCSHA1);
+        PRINT_DEPENDENCY("cache - ", cache_libbcc_dep.first,
+                                     cache_libbcc_dep.second);
+        return false;
+    }
+
+    // Check libRS.so.
+    if (::memcmp(cache_libRS_dep.second, LibRSSHA1, SHA1_DIGEST_LENGTH) != 0) {
+        ALOGD("Cache %s is dirty due to %s has been updated.", pInputFilename,
+              LibRSPath);
+        PRINT_DEPENDENCY("current - ", LibRSPath, LibRSSHA1);
+        PRINT_DEPENDENCY("cache - ", cache_libRS_dep.first,
+                                     cache_libRS_dep.second);
+        return false;
+    }
+
+    // Check libclcore.bc.
+    if (::memcmp(cache_libclcore_dep.second, LibCLCoreSHA1,
+                 SHA1_DIGEST_LENGTH) != 0) {
+        ALOGD("Cache %s is dirty due to %s has been updated.", pInputFilename,
+              LibRSPath);
+        PRINT_DEPENDENCY("current - ", LibCLCorePath, LibCLCoreSHA1);
+        PRINT_DEPENDENCY("cache - ", cache_libclcore_dep.first,
+                                     cache_libclcore_dep.second);
+        return false;
+    }
+
+#if defined(ARCH_ARM_HAVE_NEON)
+    // Check libclcore_neon.bc if NEON is available.
+    if (::memcmp(cache_libclcore_neon_dep.second, LibCLCoreNEONSHA1,
+                 SHA1_DIGEST_LENGTH) != 0) {
+        ALOGD("Cache %s is dirty due to %s has been updated.", pInputFilename,
+              LibRSPath);
+        PRINT_DEPENDENCY("current - ", LibCLCoreNEONPath, LibCLCoreNEONSHA1);
+        PRINT_DEPENDENCY("cache - ", cache_libclcore_neon_dep.first,
+                                     cache_libclcore_neon_dep.second);
+        return false;
+    }
+#endif
+
+    for (unsigned i = 0; i < pDeps.size(); i++) {
+      const std::pair<const char *, const uint8_t *> &cache_dep =
+          pInfo.mDependencyTable[i + NumBuiltInDependencies];
+
+      if ((::strcmp(pDeps[i].first, cache_dep.first) != 0) ||
+          (::memcmp(pDeps[i].second, cache_dep.second,
+                    SHA1_DIGEST_LENGTH) != 0)) {
+        ALOGD("Cache %s is dirty due to the source it dependends on has been "
+              "changed:", pInputFilename);
+        PRINT_DEPENDENCY("given - ", pDeps[i].first, pDeps[i].second);
+        PRINT_DEPENDENCY("cache - ", cache_dep.first, cache_dep.second);
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+RSInfo::RSInfo(size_t pStringPoolSize) : mStringPool(NULL) {
+  ::memset(&mHeader, 0, sizeof(mHeader));
+
+  ::memcpy(mHeader.magic, RSINFO_MAGIC, sizeof(mHeader.magic));
+  ::memcpy(mHeader.version, RSINFO_VERSION, sizeof(mHeader.version));
+
+  mHeader.headerSize = sizeof(mHeader);
+
+  mHeader.dependencyTable.itemSize = sizeof(rsinfo::DependencyTableItem);
+  mHeader.pragmaList.itemSize = sizeof(rsinfo::PragmaItem);
+  mHeader.objectSlotList.itemSize = sizeof(rsinfo::ObjectSlotItem);
+  mHeader.exportVarNameList.itemSize = sizeof(rsinfo::ExportVarNameItem);
+  mHeader.exportFuncNameList.itemSize = sizeof(rsinfo::ExportFuncNameItem);
+  mHeader.exportForeachFuncList.itemSize = sizeof(rsinfo::ExportForeachFuncItem);
+
+  if (pStringPoolSize > 0) {
+    mHeader.strPoolSize = pStringPoolSize;
+    mStringPool = new (std::nothrow) char [ mHeader.strPoolSize ];
+    if (mStringPool == NULL) {
+      ALOGE("Out of memory when allocate memory for string pool in RSInfo "
+            "constructor (size: %u)!", mHeader.strPoolSize);
+    }
+  }
+}
+
+RSInfo::~RSInfo() {
+  delete [] mStringPool;
+}
+
+bool RSInfo::layout(off_t initial_offset) {
+  mHeader.dependencyTable.offset = initial_offset +
+                                   mHeader.headerSize +
+                                   mHeader.strPoolSize;
+  mHeader.dependencyTable.count = mDependencyTable.size();
+
+#define AFTER(_list) ((_list).offset + (_list).itemSize * (_list).count)
+  mHeader.pragmaList.offset = AFTER(mHeader.dependencyTable);
+  mHeader.pragmaList.count = mPragmas.size();
+
+  mHeader.objectSlotList.offset = AFTER(mHeader.pragmaList);
+  mHeader.objectSlotList.count = mObjectSlots.size();
+
+  mHeader.exportVarNameList.offset = AFTER(mHeader.objectSlotList);
+  mHeader.exportVarNameList.count = mExportVarNames.size();
+
+  mHeader.exportFuncNameList.offset = AFTER(mHeader.exportVarNameList);
+  mHeader.exportFuncNameList.count = mExportFuncNames.size();
+
+  mHeader.exportForeachFuncList.offset = AFTER(mHeader.exportFuncNameList);
+  mHeader.exportForeachFuncList.count = mExportForeachFuncs.size();
+#undef AFTER
+
+  return true;
+}
+
+void RSInfo::dump() const {
+  // Hide the codes to save the code size when debugging is disabled.
+#if !LOG_NDEBUG
+
+  // Dump header
+  ALOGV("RSInfo Header:");
+  ALOGV("\tIs threadable: %s", ((mHeader.isThreadable) ? "true" : "false"));
+  ALOGV("\tHeader size: %u", mHeader.headerSize);
+  ALOGV("\tString pool size: %u", mHeader.strPoolSize);
+
+#define DUMP_LIST_HEADER(_name, _header) do { \
+  ALOGV(_name ":"); \
+  ALOGV("\toffset: %u", (_header).offset);  \
+  ALOGV("\t# of item: %u", (_header).count);  \
+  ALOGV("\tsize of each item: %u", (_header).itemSize); \
+} while (false)
+  DUMP_LIST_HEADER("Dependency table", mHeader.dependencyTable);
+  for (DependencyTableTy::const_iterator dep_iter = mDependencyTable.begin(),
+          dep_end = mDependencyTable.end(); dep_iter != dep_end; dep_iter++) {
+    PRINT_DEPENDENCY("", dep_iter->first, dep_iter->second);
+  }
+
+  DUMP_LIST_HEADER("Pragma list", mHeader.pragmaList);
+  for (PragmaListTy::const_iterator pragma_iter = mPragmas.begin(),
+        pragma_end = mPragmas.end(); pragma_iter != pragma_end; pragma_iter++) {
+    ALOGV("\tkey: %s, value: %s", pragma_iter->first, pragma_iter->second);
+  }
+
+  DUMP_LIST_HEADER("RS object slots", mHeader.objectSlotList);
+  for (ObjectSlotListTy::const_iterator slot_iter = mObjectSlots.begin(),
+          slot_end = mObjectSlots.end(); slot_iter != slot_end; slot_iter++) {
+    ALOGV("slot: %u", *slot_iter);
+  }
+
+  DUMP_LIST_HEADER("RS export variables", mHeader.exportVarNameList);
+  for (ExportVarNameListTy::const_iterator var_iter = mExportVarNames.begin(),
+          var_end = mExportVarNames.end(); var_iter != var_end; var_iter++) {
+    ALOGV("name: %s", *var_iter);
+  }
+
+  DUMP_LIST_HEADER("RS export functions", mHeader.exportFuncNameList);
+  for (ExportFuncNameListTy::const_iterator func_iter = mExportFuncNames.begin(),
+        func_end = mExportFuncNames.end(); func_iter != func_end; func_iter++) {
+    ALOGV("name: %s", *func_iter);
+  }
+
+  DUMP_LIST_HEADER("RS foreach list", mHeader.exportForeachFuncList);
+  for (ExportForeachFuncListTy::const_iterator
+          foreach_iter = mExportForeachFuncs.begin(),
+          foreach_end = mExportForeachFuncs.end(); foreach_iter != foreach_end;
+          foreach_iter++) {
+    ALOGV("name: %s, signature: %05x", foreach_iter->first,
+                                       foreach_iter->second);
+  }
+#undef DUMP_LIST_HEADER
+
+#endif // LOG_NDEBUG
+  return;
+}
+
+const char *RSInfo::getStringFromPool(rsinfo::StringIndexTy pStrIdx) const {
+  // String pool uses direct indexing. Ensure that the pStrIdx is within the
+  // range.
+  if (pStrIdx >= mHeader.strPoolSize) {
+    ALOGE("String index #%u is out of range in string pool (size: %u)!",
+          pStrIdx, mHeader.strPoolSize);
+    return NULL;
+  }
+  return &mStringPool[ pStrIdx ];
+}
+
+rsinfo::StringIndexTy RSInfo::getStringIdxInPool(const char *pStr) const {
+  // Assume we are on the flat memory architecture (i.e., the memory space is
+  // continuous.)
+  if ((mStringPool + mHeader.strPoolSize) < pStr) {
+    ALOGE("String %s does not in the string pool!", pStr);
+    return rsinfo::gInvalidStringIndex;
+  }
+  return (pStr - mStringPool);
+}
+
+RSInfo::FloatPrecision RSInfo::getFloatPrecisionRequirement() const {
+  // Check to see if we have any FP precision-related pragmas.
+  static const char relaxed_pragma[] = "rs_fp_relaxed";
+  static const char imprecise_pragma[] = "rs_fp_imprecise";
+  static const char full_pragma[] = "rs_fp_full";
+  bool relaxed_pragma_seen = false;
+  RSInfo::FloatPrecision result;
+
+  for (PragmaListTy::const_iterator pragma_iter = mPragmas.begin(),
+           pragma_end = mPragmas.end(); pragma_iter != pragma_end;
+       pragma_iter++) {
+    const char *pragma_key = pragma_iter->first;
+    if (::strcmp(pragma_key, relaxed_pragma) == 0) {
+      relaxed_pragma_seen = true;
+    } else if (::strcmp(pragma_key, imprecise_pragma) == 0) {
+      if (relaxed_pragma_seen) {
+        ALOGW("Multiple float precision pragmas specified!");
+      }
+      // Fast return when there's rs_fp_imprecise specified.
+      result = FP_Imprecise;
+    }
+  }
+
+  // Imprecise is selected over Relaxed precision.
+  // In the absence of both, we stick to the default Full precision.
+  if (relaxed_pragma_seen) {
+    result = FP_Relaxed;
+  } else {
+    result = FP_Full;
+  }
+
+  // Provide an override for precsion via adb shell setprop
+  // adb shell setprop debug.rs.precision rs_fp_full
+  // adb shell setprop debug.rs.precision rs_fp_relaxed
+  // adb shell setprop debug.rs.precision rs_fp_imprecise
+  char precision_prop_buf[PROPERTY_VALUE_MAX];
+  property_get("debug.rs.precision", precision_prop_buf, "");
+
+  if (precision_prop_buf[0]) {
+    if (::strcmp(precision_prop_buf, relaxed_pragma) == 0) {
+      ALOGI("Switching to RS FP relaxed mode via setprop");
+      result = FP_Relaxed;
+    } else if (::strcmp(precision_prop_buf, imprecise_pragma) == 0) {
+      ALOGI("Switching to RS FP imprecise mode via setprop");
+      result = FP_Imprecise;
+    } else if (::strcmp(precision_prop_buf, full_pragma) == 0) {
+      ALOGI("Switching to RS FP full mode via setprop");
+      result = FP_Full;
+    }
+  }
+
+  return result;
+}
diff --git a/lib/Renderscript/RSInfoExtractor.cpp b/lib/Renderscript/RSInfoExtractor.cpp
new file mode 100644
index 0000000..3392982
--- /dev/null
+++ b/lib/Renderscript/RSInfoExtractor.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//===----------------------------------------------------------------------===//
+// This file implements RSInfo::ExtractFromSource()
+//===----------------------------------------------------------------------===//
+#include "bcc/Renderscript/RSInfo.h"
+
+#include <llvm/Constants.h>
+#include <llvm/Metadata.h>
+#include <llvm/Module.h>
+
+#include "bcc/Source.h"
+#include "bcc/Support/Log.h"
+
+using namespace bcc;
+
+namespace {
+
+// Name of metadata node where pragma info resides (should be synced with
+// slang.cpp)
+const llvm::StringRef pragma_metadata_name("#pragma");
+
+/*
+ * The following names should be synced with the one appeared in
+ * slang_rs_metadata.h.
+ */
+// Name of metadata node where exported variable names reside
+const llvm::StringRef export_var_metadata_name("#rs_export_var");
+
+// Name of metadata node where exported function names reside
+const llvm::StringRef export_func_metadata_name("#rs_export_func");
+
+// Name of metadata node where exported ForEach name information resides
+const llvm::StringRef export_foreach_name_metadata_name("#rs_export_foreach_name");
+
+// Name of metadata node where exported ForEach signature information resides
+const llvm::StringRef export_foreach_metadata_name("#rs_export_foreach");
+
+// Name of metadata node where RS object slot info resides (should be
+const llvm::StringRef object_slot_metadata_name("#rs_object_slots");
+
+inline llvm::StringRef getStringFromOperand(const llvm::Value *pString) {
+  if ((pString != NULL) && (pString->getValueID() == llvm::Value::MDStringVal)) {
+    return static_cast<const llvm::MDString *>(pString)->getString();
+  }
+  return llvm::StringRef();
+}
+
+template<size_t NumOperands>
+inline size_t getMetadataStringLength(const llvm::NamedMDNode *pMetadata) {
+  if (pMetadata == NULL) {
+    return 0;
+  }
+
+  size_t string_size = 0;
+  for (unsigned i = 0, e = pMetadata->getNumOperands(); i < e; i++) {
+    llvm::MDNode *node = pMetadata->getOperand(i);
+    if ((node != NULL) && (node->getNumOperands() >= NumOperands)) {
+      // Compiler try its best to unroll this loop since NumOperands is a
+      // template parameter (therefore the number of iteration can be determined
+      // at compile-time and it's usually small.)
+      for (unsigned j = 0; j < NumOperands; j++) {
+        llvm::StringRef s = getStringFromOperand(node->getOperand(j));
+        if (s.size() > 0) {
+          // +1 is for the null-terminator at the end of string.
+          string_size += (s.size() + 1);
+        }
+      }
+    }
+  }
+
+  return string_size;
+}
+
+// Write a string pString to the string pool pStringPool at offset pWriteStart.
+// Return the pointer the pString resides within the string pool.
+const char *writeString(const llvm::StringRef &pString, char *pStringPool,
+                        off_t *pWriteStart) {
+  if (pString.empty()) {
+    return pStringPool;
+  }
+
+  char *pStringWriteStart = pStringPool + *pWriteStart;
+  // Copy the string.
+  ::memcpy(pStringWriteStart, pString.data(), pString.size());
+  // Write null-terminator at the end of the string.
+  pStringWriteStart[ pString.size() ] = '\0';
+  // Update pWriteStart.
+  *pWriteStart += (pString.size() + 1);
+
+  return pStringWriteStart;
+}
+
+bool writeDependency(const std::string &pSourceName, const uint8_t *pSHA1,
+                     char *pStringPool, off_t *pWriteStart,
+                     RSInfo::DependencyTableTy &pDepTable) {
+  const char *source_name = writeString(pSourceName, pStringPool, pWriteStart);
+
+  uint8_t *sha1 = reinterpret_cast<uint8_t *>(pStringPool + *pWriteStart);
+
+  // SHA-1 is special. It's size of SHA1_DIGEST_LENGTH (=20) bytes long without
+  // null-terminator.
+  ::memcpy(sha1, pSHA1, SHA1_DIGEST_LENGTH);
+  // Record in the result RSInfo object.
+  pDepTable.push(std::make_pair(source_name, sha1));
+  // Update the string pool pointer.
+  *pWriteStart += SHA1_DIGEST_LENGTH;
+
+  return true;
+}
+
+} // end anonymous namespace
+
+RSInfo *RSInfo::ExtractFromSource(const Source &pSource,
+                                  const DependencyTableTy &pDeps)
+{
+  const llvm::Module &module = pSource.getModule();
+  const char *module_name = module.getModuleIdentifier().c_str();
+
+  const llvm::NamedMDNode *pragma =
+      module.getNamedMetadata(pragma_metadata_name);
+  const llvm::NamedMDNode *export_var =
+      module.getNamedMetadata(export_var_metadata_name);
+  const llvm::NamedMDNode *export_func =
+      module.getNamedMetadata(export_func_metadata_name);
+  const llvm::NamedMDNode *export_foreach_name =
+      module.getNamedMetadata(export_foreach_name_metadata_name);
+  const llvm::NamedMDNode *export_foreach_signature =
+      module.getNamedMetadata(export_foreach_metadata_name);
+  const llvm::NamedMDNode *object_slots =
+      module.getNamedMetadata(object_slot_metadata_name);
+
+  // Always write a byte 0x0 at the beginning of the string pool.
+  size_t string_pool_size = 1;
+  off_t cur_string_pool_offset = 0;
+
+  RSInfo *result = NULL;
+
+  // Handle legacy case for pre-ICS bitcode that doesn't contain a metadata
+  // section for ForEach. We generate a full signature for a "root" function.
+  if ((export_foreach_name == NULL) || (export_foreach_signature == NULL)) {
+    export_foreach_name = NULL;
+    export_foreach_signature = NULL;
+    string_pool_size += 5;  // insert "root\0" for #rs_export_foreach_name
+  }
+
+  string_pool_size += getMetadataStringLength<2>(pragma);
+  string_pool_size += getMetadataStringLength<1>(export_var);
+  string_pool_size += getMetadataStringLength<1>(export_func);
+  string_pool_size += getMetadataStringLength<1>(export_foreach_name);
+
+  // Don't forget to reserve the space for the dependency informationin string
+  // pool.
+  string_pool_size += ::strlen(LibBCCPath) + 1 + SHA1_DIGEST_LENGTH;
+  string_pool_size += ::strlen(LibRSPath) + 1 + SHA1_DIGEST_LENGTH;
+  string_pool_size += ::strlen(LibCLCorePath) + 1 + SHA1_DIGEST_LENGTH;
+#if defined(ARCH_ARM_HAVE_NEON)
+  string_pool_size += ::strlen(LibCLCoreNEONPath) + 1 + SHA1_DIGEST_LENGTH;
+#endif
+  for (unsigned i = 0, e = pDeps.size(); i != e; i++) {
+    // +1 for null-terminator
+    string_pool_size += ::strlen(/* name */pDeps[i].first) + 1;
+    // +SHA1_DIGEST_LENGTH for SHA-1 checksum
+    string_pool_size += SHA1_DIGEST_LENGTH;
+  }
+
+  // Allocate result object
+  result = new (std::nothrow) RSInfo(string_pool_size);
+  if (result == NULL) {
+    ALOGE("Out of memory when create RSInfo object for %s!", module_name);
+    goto bail;
+  }
+
+  // Check string pool.
+  if (result->mStringPool == NULL) {
+    ALOGE("Out of memory when allocate string pool in RSInfo object for %s!",
+          module_name);
+    goto bail;
+  }
+
+  // First byte of string pool should be an empty string
+  result->mStringPool[ cur_string_pool_offset++ ] = '\0';
+
+  // Populate all the strings and data.
+#define FOR_EACH_NODE_IN(_metadata, _node)  \
+  for (unsigned i = 0, e = (_metadata)->getNumOperands(); i != e; i++)  \
+    if (((_node) = (_metadata)->getOperand(i)) != NULL)
+  //===--------------------------------------------------------------------===//
+  // #pragma
+  //===--------------------------------------------------------------------===//
+  // Pragma is actually a key-value pair. The value can be an empty string while
+  // the key cannot.
+  if (pragma != NULL) {
+    llvm::MDNode *node;
+    FOR_EACH_NODE_IN(pragma, node) {
+        llvm::StringRef key = getStringFromOperand(node->getOperand(0));
+        llvm::StringRef val = getStringFromOperand(node->getOperand(1));
+        if (key.empty()) {
+          ALOGW("%s contains pragma metadata with empty key (skip)!",
+                module_name);
+        } else {
+          result->mPragmas.push(std::make_pair(
+              writeString(key, result->mStringPool, &cur_string_pool_offset),
+              writeString(val, result->mStringPool, &cur_string_pool_offset)));
+        } // key.empty()
+    } // FOR_EACH_NODE_IN
+  } // pragma != NULL
+
+  //===--------------------------------------------------------------------===//
+  // #rs_export_var
+  //===--------------------------------------------------------------------===//
+  if (export_var != NULL) {
+    llvm::MDNode *node;
+    FOR_EACH_NODE_IN(export_var, node) {
+      llvm::StringRef name = getStringFromOperand(node->getOperand(0));
+      if (name.empty()) {
+        ALOGW("%s contains empty entry in #rs_export_var metadata (skip)!",
+              module_name);
+      } else {
+          result->mExportVarNames.push(
+              writeString(name, result->mStringPool, &cur_string_pool_offset));
+      }
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // #rs_export_func
+  //===--------------------------------------------------------------------===//
+  if (export_func != NULL) {
+    llvm::MDNode *node;
+    FOR_EACH_NODE_IN(export_func, node) {
+      llvm::StringRef name = getStringFromOperand(node->getOperand(0));
+      if (name.empty()) {
+        ALOGW("%s contains empty entry in #rs_export_func metadata (skip)!",
+              module_name);
+      } else {
+        result->mExportFuncNames.push(
+            writeString(name, result->mStringPool, &cur_string_pool_offset));
+      }
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // #rs_export_foreach and #rs_export_foreach_name
+  //===--------------------------------------------------------------------===//
+  // It's a little bit complicated to deal with #rs_export_foreach (the
+  // signature of foreach-able function) and #rs_export_foreach_name (the name
+  // of function which is foreach-able). We have to maintain a legacy case:
+  //
+  //  In pre-ICS bitcode, forEach feature only supports non-graphic root()
+  //  function and only one signature corresponded to that non-graphic root()
+  //  was written to the #rs_export_foreach metadata section. There's no
+  //  #rs_export_foreach_name metadata section.
+  //
+  // Currently, not only non-graphic root() is supported but also other
+  // functions that are exportable. Therefore, a new metadata section
+  // #rs_export_foreach_name is added to specify which functions are
+  // for-eachable. In this case, #rs_export_foreach (the function name) and
+  // #rs_export_foreach metadata (the signature) is one-to-one mapping among
+  // their entries.
+  if ((export_foreach_name != NULL) && (export_foreach_signature != NULL)) {
+    unsigned num_foreach_function;
+
+    // Should be one-to-one mapping.
+    if (export_foreach_name->getNumOperands() !=
+        export_foreach_signature->getNumOperands()) {
+      ALOGE("Mismatch number of foreach-able function names (%u) in "
+            "#rs_export_foreach_name and number of signatures (%u) "
+            "in %s!", export_foreach_name->getNumOperands(),
+            export_foreach_signature->getNumOperands(), module_name);
+      goto bail;
+    }
+
+    num_foreach_function = export_foreach_name->getNumOperands();
+    for (unsigned i = 0; i < num_foreach_function; i++) {
+      llvm::MDNode *name_node = export_foreach_name->getOperand(i);
+      llvm::MDNode *signature_node = export_foreach_signature->getOperand(i);
+
+      llvm::StringRef name, signature_string;
+      if (name_node != NULL) {
+        name = getStringFromOperand(name_node->getOperand(0));
+      }
+      if (signature_node != NULL) {
+        signature_string = getStringFromOperand(signature_node->getOperand(0));
+      }
+
+      if (!name.empty() && !signature_string.empty()) {
+        // Both name_node and signature_node are not NULL nodes.
+        uint32_t signature;
+        if (signature_string.getAsInteger(10, signature)) {
+          ALOGE("Non-integer signature value '%s' for function %s found in %s!",
+                signature_string.str().c_str(), name.str().c_str(), module_name);
+          goto bail;
+        }
+        result->mExportForeachFuncs.push(std::make_pair(
+              writeString(name, result->mStringPool, &cur_string_pool_offset),
+              signature));
+      } else {
+        // One or both of the name and signature value are empty. It's safe only
+        // if both of them are empty.
+        if (name.empty() && signature_string.empty()) {
+          ALOGW("Entries #%u at #rs_export_foreach_name and #rs_export_foreach"
+                " are both NULL in %s! (skip)", i, module_name);
+          continue;
+        } else {
+          ALOGE("Entries #%u at %s is NULL in %s! (skip)", i,
+                (name.empty() ? "#rs_export_foreach_name" :
+                                "#rs_export_foreach"), module_name);
+          goto bail;
+        }
+      }
+    } // end for
+  } else {
+    // To handle the legacy case, we generate a full signature for a "root"
+    // function which means that we need to set the bottom 5 bits (0x1f) in the
+    // mask.
+    result->mExportForeachFuncs.push(std::make_pair(
+          writeString(llvm::StringRef("root"), result->mStringPool,
+                      &cur_string_pool_offset), 0x1f));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // #rs_object_slots
+  //===--------------------------------------------------------------------===//
+  if (object_slots != NULL) {
+    llvm::MDNode *node;
+    FOR_EACH_NODE_IN(object_slots, node) {
+      llvm::StringRef val = getStringFromOperand(node->getOperand(0));
+      if (val.empty()) {
+        ALOGW("%s contains empty entry in #rs_object_slots (skip)!",
+              module.getModuleIdentifier().c_str());
+      } else {
+        uint32_t slot;
+        if (val.getAsInteger(10, slot)) {
+          ALOGE("Non-integer object slot value '%s' in %s!", val.str().c_str(),
+                module.getModuleIdentifier().c_str());
+          goto bail;
+        }
+      }
+    }
+  }
+#undef FOR_EACH_NODE_IN
+
+  //===--------------------------------------------------------------------===//
+  // Record built-in dependency information.
+  //===--------------------------------------------------------------------===//
+  LoadBuiltInSHA1Information();
+
+  if (!writeDependency(LibBCCPath, LibBCCSHA1,
+                       result->mStringPool, &cur_string_pool_offset,
+                       result->mDependencyTable)) {
+    goto bail;
+  }
+
+  if (!writeDependency(LibRSPath, LibRSSHA1,
+                       result->mStringPool, &cur_string_pool_offset,
+                       result->mDependencyTable)) {
+    goto bail;
+  }
+
+  if (!writeDependency(LibCLCorePath, LibCLCoreSHA1,
+                       result->mStringPool, &cur_string_pool_offset,
+                       result->mDependencyTable)) {
+    goto bail;
+  }
+
+#if defined(ARCH_ARM_HAVE_NEON)
+  if (!writeDependency(LibCLCoreNEONPath, LibCLCoreNEONSHA1,
+                       result->mStringPool, &cur_string_pool_offset,
+                       result->mDependencyTable)) {
+    goto bail;
+  }
+#endif
+
+  //===--------------------------------------------------------------------===//
+  // Record dependency information.
+  //===--------------------------------------------------------------------===//
+  for (unsigned i = 0, e = pDeps.size(); i != e; i++) {
+    if (!writeDependency(/* name */pDeps[i].first, /* SHA-1 */pDeps[i].second,
+                         result->mStringPool, &cur_string_pool_offset,
+                         result->mDependencyTable)) {
+      goto bail;
+    }
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Determine whether the bitcode contains debug information
+  //===--------------------------------------------------------------------===//
+  // The root context of the debug information in the bitcode is put under
+  // the metadata named "llvm.dbg.cu".
+  result->mHeader.hasDebugInformation =
+      static_cast<uint8_t>(module.getNamedMetadata("llvm.dbg.cu") != NULL);
+
+  assert((cur_string_pool_offset == string_pool_size) &&
+            "Unexpected string pool size!");
+
+  return result;
+
+bail:
+  delete result;
+  return NULL;
+}
diff --git a/lib/Renderscript/RSInfoReader.cpp b/lib/Renderscript/RSInfoReader.cpp
new file mode 100644
index 0000000..fe5626a
--- /dev/null
+++ b/lib/Renderscript/RSInfoReader.cpp
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//===----------------------------------------------------------------------===//
+// This file implements RSInfo::ReadFromFile()
+//===----------------------------------------------------------------------===//
+
+#include "bcc/Renderscript/RSInfo.h"
+
+#include <new>
+
+#include <utils/FileMap.h>
+
+#include "bcc/Support/Log.h"
+#include "bcc/Support/InputFile.h"
+
+using namespace bcc;
+
+namespace {
+
+template<typename ItemType, typename ItemContainer>
+inline bool helper_read_list_item(const ItemType &pItem,
+                                  const RSInfo &pInfo,
+                                  ItemContainer &pResult);
+
+// Process DependencyTableItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::DependencyTableItem, RSInfo::DependencyTableTy>(
+    const rsinfo::DependencyTableItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::DependencyTableTy &pResult)
+{
+  const char *id = pInfo.getStringFromPool(pItem.id);
+  const uint8_t *sha1 =
+      reinterpret_cast<const uint8_t *>(pInfo.getStringFromPool(pItem.sha1));
+
+  if (id == NULL) {
+    ALOGE("Invalid string index %d for source id in RS dependenct table.",
+          pItem.id);
+    return false;
+  }
+
+  if (sha1 == NULL) {
+    ALOGE("Invalid string index %d for SHA-1 checksum in RS dependenct table.",
+          pItem.id);
+    return false;
+  }
+
+  pResult.push(std::make_pair(id, sha1));
+  return true;
+}
+
+// Process PragmaItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::PragmaItem, RSInfo::PragmaListTy>(
+    const rsinfo::PragmaItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::PragmaListTy &pResult)
+{
+  const char *key = pInfo.getStringFromPool(pItem.key);
+  const char *value =pInfo.getStringFromPool(pItem.value);
+
+  if (key == NULL) {
+    ALOGE("Invalid string index %d for key in RS pragma list.", pItem.key);
+    return false;
+  }
+
+  if (value == NULL) {
+    ALOGE("Invalid string index %d for value in RS pragma list.", pItem.value);
+    return false;
+  }
+
+  pResult.push(std::make_pair(key, value));
+  return true;
+}
+
+// Procee ObjectSlotItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::ObjectSlotItem, RSInfo::ObjectSlotListTy>(
+    const rsinfo::ObjectSlotItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::ObjectSlotListTy &pResult)
+{
+  pResult.push(pItem.slot);
+  return true;
+}
+
+// Procee ExportVarNameItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::ExportVarNameItem, RSInfo::ExportVarNameListTy>(
+    const rsinfo::ExportVarNameItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::ExportVarNameListTy &pResult)
+{
+  const char *name = pInfo.getStringFromPool(pItem.name);
+
+  if (name == NULL) {
+    ALOGE("Invalid string index %d for name in RS export vars.", pItem.name);
+    return false;
+  }
+
+  pResult.push(name);
+  return true;
+}
+
+// Procee ExportFuncNameItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::ExportFuncNameItem, RSInfo::ExportFuncNameListTy>(
+    const rsinfo::ExportFuncNameItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::ExportFuncNameListTy &pResult)
+{
+  const char *name = pInfo.getStringFromPool(pItem.name);
+
+  if (name == NULL) {
+    ALOGE("Invalid string index %d for name in RS export funcs.", pItem.name);
+    return false;
+  }
+
+  pResult.push(name);
+  return true;
+}
+
+// Procee ExportForeachFuncItem in the file
+template<> inline bool
+helper_read_list_item<rsinfo::ExportForeachFuncItem, RSInfo::ExportForeachFuncListTy>(
+    const rsinfo::ExportForeachFuncItem &pItem,
+    const RSInfo &pInfo,
+    RSInfo::ExportForeachFuncListTy &pResult)
+{
+  const char *name = pInfo.getStringFromPool(pItem.name);
+
+  if (name == NULL) {
+    ALOGE("Invalid string index %d for name in RS export foreachs.", pItem.name);
+    return false;
+  }
+
+  pResult.push(std::make_pair(name, pItem.signature));
+  return true;
+}
+
+template<typename ItemType, typename ItemContainer>
+inline bool helper_read_list(const uint8_t *pData,
+                             const RSInfo &pInfo,
+                             const rsinfo::ListHeader &pHeader,
+                             ItemContainer &pResult) {
+  const ItemType *item;
+
+  // Out-of-range exception has been checked.
+  for (uint32_t i = 0; i < pHeader.count; i++) {
+    item = reinterpret_cast<const ItemType *>(pData +
+                                              pHeader.offset +
+                                              i * pHeader.itemSize);
+    if (!helper_read_list_item<ItemType, ItemContainer>(*item, pInfo, pResult)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+} // end anonymous namespace
+
+RSInfo *RSInfo::ReadFromFile(InputFile &pInput, const DependencyTableTy &pDeps) {
+  android::FileMap *map = NULL;
+  RSInfo *result = NULL;
+  const uint8_t *data;
+  const rsinfo::Header *header;
+  size_t filesize;
+  const char *input_filename = pInput.getName().c_str();
+  const off_t cur_input_offset = pInput.tell();
+
+  if (pInput.hasError()) {
+    ALOGE("Invalid RS info file %s! (%s)", input_filename,
+                                           pInput.getErrorMessage().c_str());
+    goto bail;
+  }
+
+  filesize = pInput.getSize();
+  if (pInput.hasError()) {
+    ALOGE("Failed to get the size of RS info file %s! (%s)",
+          input_filename, pInput.getErrorMessage().c_str());
+    goto bail;
+  }
+
+  // Create memory map for the file.
+  map = pInput.createMap(/* pOffset */cur_input_offset,
+                         /* pLength */filesize - cur_input_offset);
+  if (map == NULL) {
+    ALOGE("Failed to map RS info file %s to the memory! (%s)",
+          input_filename, pInput.getErrorMessage().c_str());
+    goto bail;
+  }
+
+  data = reinterpret_cast<const uint8_t *>(map->getDataPtr());
+
+  // Header starts at the beginning of the file.
+  header = reinterpret_cast<const rsinfo::Header *>(data);
+
+  // Check the magic.
+  if (::memcmp(header->magic, RSINFO_MAGIC, sizeof(header->magic)) != 0) {
+    ALOGV("Wrong magic found in the RS info file %s. Treat it as a dirty "
+          "cache.", input_filename);
+    goto bail;
+  }
+
+  // Check the version.
+  if (::memcmp(header->version,
+               RSINFO_VERSION,
+               sizeof((header->version)) != 0)) {
+    ALOGV("Mismatch the version of RS info file %s: (current) %s v.s. (file) "
+          "%s. Treat it as as a dirty cache.", input_filename, RSINFO_VERSION,
+          header->version);
+    goto bail;
+  }
+
+  // Check the size.
+  if ((header->headerSize != sizeof(rsinfo::Header)) ||
+      (header->dependencyTable.itemSize != sizeof(rsinfo::DependencyTableItem)) ||
+      (header->pragmaList.itemSize != sizeof(rsinfo::PragmaItem)) ||
+      (header->objectSlotList.itemSize != sizeof(rsinfo::ObjectSlotItem)) ||
+      (header->exportVarNameList.itemSize != sizeof(rsinfo::ExportVarNameItem)) ||
+      (header->exportFuncNameList.itemSize != sizeof(rsinfo::ExportFuncNameItem)) ||
+      (header->exportForeachFuncList.itemSize != sizeof(rsinfo::ExportForeachFuncItem))) {
+    ALOGW("Corrupted RS info file %s! (unexpected size found)", input_filename);
+    goto bail;
+  }
+
+  // Check the range.
+#define LIST_DATA_RANGE(_list_header) \
+  ((_list_header).offset + (_list_header).count * (_list_header).itemSize)
+  if (((header->headerSize + header->strPoolSize) > filesize) ||
+      (LIST_DATA_RANGE(header->dependencyTable) > filesize) ||
+      (LIST_DATA_RANGE(header->pragmaList) > filesize) ||
+      (LIST_DATA_RANGE(header->objectSlotList) > filesize) ||
+      (LIST_DATA_RANGE(header->exportVarNameList) > filesize) ||
+      (LIST_DATA_RANGE(header->exportFuncNameList) > filesize) ||
+      (LIST_DATA_RANGE(header->exportForeachFuncList) > filesize)) {
+    ALOGW("Corrupted RS info file %s! (data out of the range)", input_filename);
+    goto bail;
+  }
+#undef LIST_DATA_RANGE
+
+  // File seems ok, create result RSInfo object.
+  result = new (std::nothrow) RSInfo(header->strPoolSize);
+  if (result == NULL) {
+    ALOGE("Out of memory when create RSInfo object for %s!", input_filename);
+    goto bail;
+  }
+
+  // Make advice on our access pattern.
+  map->advise(android::FileMap::SEQUENTIAL);
+
+  // Copy the header.
+  ::memcpy(&result->mHeader, header, sizeof(rsinfo::Header));
+
+  if (header->strPoolSize > 0) {
+    // Copy the string pool. The string pool is immediately after the header at
+    // the offset header->headerSize.
+    if (result->mStringPool == NULL) {
+      ALOGE("Out of memory when allocate string pool for RS info file %s!",
+            input_filename);
+      goto bail;
+    }
+    ::memcpy(result->mStringPool, data + result->mHeader.headerSize,
+             result->mHeader.strPoolSize);
+  }
+
+  // Populate all the data to the result object.
+  if (!helper_read_list<rsinfo::DependencyTableItem, DependencyTableTy>
+        (data, *result, header->dependencyTable, result->mDependencyTable)) {
+    goto bail;
+  }
+
+  // Check dependency to see whether the cache is dirty or not.
+  if (!CheckDependency(*result, pInput.getName().c_str(), pDeps)) {
+    goto bail;
+  }
+
+  if (!helper_read_list<rsinfo::PragmaItem, PragmaListTy>
+        (data, *result, header->pragmaList, result->mPragmas)) {
+    goto bail;
+  }
+
+  if (!helper_read_list<rsinfo::ObjectSlotItem, ObjectSlotListTy>
+        (data, *result, header->objectSlotList, result->mObjectSlots)) {
+    goto bail;
+  }
+
+  if (!helper_read_list<rsinfo::ExportVarNameItem, ExportVarNameListTy>
+        (data, *result, header->exportVarNameList, result->mExportVarNames)) {
+    goto bail;
+  }
+
+  if (!helper_read_list<rsinfo::ExportFuncNameItem, ExportFuncNameListTy>
+        (data, *result, header->exportFuncNameList, result->mExportFuncNames)) {
+    goto bail;
+  }
+
+  if (!helper_read_list<rsinfo::ExportForeachFuncItem, ExportForeachFuncListTy>
+        (data, *result, header->exportForeachFuncList, result->mExportForeachFuncs)) {
+    goto bail;
+  }
+
+  // Clean up.
+  map->release();
+
+  return result;
+
+bail:
+  if (map != NULL) {
+    map->release();
+  }
+
+  delete result;
+
+  return NULL;
+} // RSInfo::ReadFromFile
diff --git a/lib/Renderscript/RSInfoWriter.cpp b/lib/Renderscript/RSInfoWriter.cpp
new file mode 100644
index 0000000..0eee62c
--- /dev/null
+++ b/lib/Renderscript/RSInfoWriter.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//===----------------------------------------------------------------------===//
+// This file implements RSInfo::write()
+//===----------------------------------------------------------------------===//
+
+#include "bcc/Renderscript/RSInfo.h"
+
+#include "bcc/Support/Log.h"
+#include "bcc/Support/OutputFile.h"
+
+using namespace bcc;
+
+namespace {
+
+template<typename ItemType, typename ItemContainer> inline bool
+helper_adapt_list_item(ItemType &pResult, const RSInfo &pInfo,
+                       const typename ItemContainer::const_iterator &pItem);
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::DependencyTableItem, RSInfo::DependencyTableTy>(
+    rsinfo::DependencyTableItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::DependencyTableTy::const_iterator &pItem) {
+  pResult.id = pInfo.getStringIdxInPool(pItem->first);
+  pResult.sha1 =
+      pInfo.getStringIdxInPool(reinterpret_cast<const char *>(pItem->second));
+
+  if (pResult.id == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS dependency table contains invalid source id string '%s'.",
+          pItem->first);
+    return false;
+  }
+
+  if (pResult.sha1 == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS dependency table contains invalid SHA-1 checksum string in '%s'.",
+          pItem->first);
+    return false;
+  }
+
+  return true;
+}
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::PragmaItem, RSInfo::PragmaListTy>(
+    rsinfo::PragmaItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::PragmaListTy::const_iterator &pItem) {
+  pResult.key = pInfo.getStringIdxInPool(pItem->first);
+  pResult.value = pInfo.getStringIdxInPool(pItem->second);
+
+  if (pResult.key == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS pragma list contains invalid string '%s' for key.", pItem->first);
+    return false;
+  }
+
+  if (pResult.value == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS pragma list contains invalid string '%s' for value.",
+          pItem->second);
+    return false;
+  }
+
+  return true;
+}
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::ObjectSlotItem, RSInfo::ObjectSlotListTy>(
+    rsinfo::ObjectSlotItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::ObjectSlotListTy::const_iterator &pItem) {
+  pResult.slot = *pItem;
+  return true;
+}
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::ExportVarNameItem, RSInfo::ExportVarNameListTy>(
+    rsinfo::ExportVarNameItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::ExportVarNameListTy::const_iterator &pItem) {
+  pResult.name = pInfo.getStringIdxInPool(*pItem);
+
+  if (pResult.name == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS export vars contains invalid string '%s' for name.", *pItem);
+    return false;
+  }
+
+  return true;
+}
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::ExportFuncNameItem,
+                       RSInfo::ExportFuncNameListTy>(
+    rsinfo::ExportFuncNameItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::ExportFuncNameListTy::const_iterator &pItem) {
+  pResult.name = pInfo.getStringIdxInPool(*pItem);
+
+  if (pResult.name == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS export funcs contains invalid string '%s' for name.", *pItem);
+    return false;
+  }
+
+  return true;
+}
+
+template<> inline bool
+helper_adapt_list_item<rsinfo::ExportForeachFuncItem,
+                       RSInfo::ExportForeachFuncListTy>(
+    rsinfo::ExportForeachFuncItem &pResult,
+    const RSInfo &pInfo,
+    const RSInfo::ExportForeachFuncListTy::const_iterator &pItem) {
+  pResult.name = pInfo.getStringIdxInPool(pItem->first);
+  pResult.signature = pItem->second;
+
+  if (pResult.name == rsinfo::gInvalidStringIndex) {
+    ALOGE("RS export foreach contains invalid string '%s' for name.",
+          pItem->first);
+    return false;
+  }
+
+  return true;
+}
+
+template<typename ItemType, typename ItemContainer>
+inline bool helper_write_list(OutputFile &pOutput,
+                              const RSInfo &pInfo,
+                              const rsinfo::ListHeader &pHeader,
+                              ItemContainer &pList) {
+  ItemType item;
+
+  for (typename ItemContainer::const_iterator item_iter = pList.begin(),
+          item_end = pList.end(); item_iter != item_end; item_iter++) {
+    // Convert each entry in the pList to ItemType.
+    if (!helper_adapt_list_item<ItemType, ItemContainer>(item,
+                                                         pInfo,
+                                                         item_iter)) {
+      return false;
+    }
+    // And write out an item.
+    if (pOutput.write(&item, sizeof(item)) != sizeof(item)) {
+      ALOGE("Cannot write out item of %s for RSInfo file %s! (%s)",
+            rsinfo::GetItemTypeName<ItemType>(), pOutput.getName().c_str(),
+            pOutput.getErrorMessage().c_str());
+      return false;
+    }
+  }
+
+  return true;
+}
+
+} // end anonymous namespace
+
+bool RSInfo::write(OutputFile &pOutput) {
+  off_t initial_offset = pOutput.tell();
+  const char *output_filename = pOutput.getName().c_str();
+
+  if (pOutput.hasError()) {
+    ALOGE("Invalid RS info file %s for output! (%s)",
+          output_filename, pOutput.getErrorMessage().c_str());
+    return false;
+  }
+
+  // Layout.
+  if (!layout(initial_offset)) {
+    return false;
+  }
+
+  // Write header.
+  if (pOutput.write(&mHeader, sizeof(mHeader)) != sizeof(mHeader)) {
+    ALOGE("Cannot write out the header for RSInfo file %s! (%s)",
+          output_filename, pOutput.getErrorMessage().c_str());
+    return false;
+  }
+
+  // Write string pool.
+  if (static_cast<size_t>(pOutput.write(mStringPool, mHeader.strPoolSize))
+          != mHeader.strPoolSize) {
+    ALOGE("Cannot write out the string pool for RSInfo file %s! (%s)",
+          output_filename, pOutput.getErrorMessage().c_str());
+    return false;
+  }
+
+  // Write dependencyTable.
+  if (!helper_write_list<rsinfo::DependencyTableItem, DependencyTableTy>
+        (pOutput, *this, mHeader.dependencyTable, mDependencyTable)) {
+    return false;
+  }
+
+  // Write pragmaList.
+  if (!helper_write_list<rsinfo::PragmaItem, PragmaListTy>
+        (pOutput, *this, mHeader.pragmaList, mPragmas)) {
+    return false;
+  }
+
+  // Write objectSlotList.
+  if (!helper_write_list<rsinfo::ObjectSlotItem, ObjectSlotListTy>
+        (pOutput, *this, mHeader.objectSlotList, mObjectSlots)) {
+    return false;
+  }
+
+  // Write exportVarNameList.
+  if (!helper_write_list<rsinfo::ExportVarNameItem, ExportVarNameListTy>
+        (pOutput, *this, mHeader.exportVarNameList, mExportVarNames)) {
+    return false;
+  }
+
+  // Write exportFuncNameList.
+  if (!helper_write_list<rsinfo::ExportFuncNameItem, ExportFuncNameListTy>
+        (pOutput, *this, mHeader.exportFuncNameList, mExportFuncNames)) {
+    return false;
+  }
+
+  // Write exportForeachFuncList.
+  if (!helper_write_list<rsinfo::ExportForeachFuncItem, ExportForeachFuncListTy>
+        (pOutput, *this, mHeader.exportForeachFuncList, mExportForeachFuncs)) {
+    return false;
+  }
+
+  return true;
+}
diff --git a/lib/Renderscript/RSScript.cpp b/lib/Renderscript/RSScript.cpp
new file mode 100644
index 0000000..75cfff9
--- /dev/null
+++ b/lib/Renderscript/RSScript.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bcc/Renderscript/RSScript.h"
+
+#include "bcc/Renderscript/RSInfo.h"
+#include "bcc/Source.h"
+#include "bcc/Support/Log.h"
+
+using namespace bcc;
+
+bool RSScript::LinkRuntime(RSScript &pScript) {
+  // Using the same context with the source in pScript.
+  BCCContext &context = pScript.getSource().getContext();
+  const char* core_lib = RSInfo::LibCLCorePath;
+
+  // NEON-capable devices can use an accelerated math library for all
+  // reduced precision scripts.
+#if defined(ARCH_ARM_HAVE_NEON)
+  const RSInfo* info = pScript.getInfo();
+  if ((info != NULL) &&
+      (info->getFloatPrecisionRequirement() != RSInfo::FP_Full)) {
+    core_lib = RSInfo::LibCLCoreNEONPath;
+  }
+#endif
+
+  Source *libclcore_source = Source::CreateFromFile(context, core_lib);
+  if (libclcore_source == NULL) {
+    ALOGE("Failed to load Renderscript library '%s' to link!", core_lib);
+    return false;
+  }
+
+  if (!pScript.getSource().merge(*libclcore_source,
+                                 /* pPreserveSource */false)) {
+    ALOGE("Failed to link Renderscript library '%s'!", core_lib);
+    delete libclcore_source;
+    return false;
+  }
+
+  return true;
+}
+
+RSScript::RSScript(Source &pSource)
+  : Script(pSource), mInfo(NULL), mCompilerVersion(0),
+    mOptimizationLevel(kOptLvl3) { }
+
+bool RSScript::doReset() {
+  mInfo = NULL;
+  mCompilerVersion = 0;
+  mOptimizationLevel = kOptLvl3;
+  return true;
+}
diff --git a/lib/Renderscript/runtime/Android.mk b/lib/Renderscript/runtime/Android.mk
new file mode 100644
index 0000000..4227875
--- /dev/null
+++ b/lib/Renderscript/runtime/Android.mk
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2011-2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+# C/LLVM-IR source files for the library
+clcore_base_files := \
+    rs_allocation.c \
+    rs_cl.c \
+    rs_core.c \
+    rs_element.c \
+    rs_mesh.c \
+    rs_program.c \
+    rs_sample.c \
+    rs_sampler.c \
+    convert.ll \
+    matrix.ll \
+    pixel_packing.ll \
+    math.ll
+
+clcore_files := \
+    $(clcore_base_files) \
+    arch/generic.c
+
+clcore_neon_files := \
+    $(clcore_base_files) \
+    arch/neon.ll
+
+ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+  RS_VERSION := $(PLATFORM_SDK_VERSION)
+else
+  # Increment by 1 whenever this is not a final release build, since we want to
+  # be able to see the RS version number change during development.
+  # See build/core/version_defaults.mk for more information about this.
+  RS_VERSION := "(1 + $(PLATFORM_SDK_VERSION))"
+endif
+
+# Build the base version of the library
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+
+# Build a NEON-enabled version of the library (if possible)
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := libclcore_neon.bc
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_SRC_FILES := $(clcore_neon_files)
+
+include $(LOCAL_PATH)/build_bc_lib.mk
+endif
diff --git a/lib/Renderscript/runtime/arch/generic.c b/lib/Renderscript/runtime/arch/generic.c
new file mode 100644
index 0000000..8f299e4
--- /dev/null
+++ b/lib/Renderscript/runtime/arch/generic.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "rs_types.rsh"
+
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high);
+extern float4 __attribute__((overloadable)) clamp(float4 amount, float4 low, float4 high);
+extern uchar4 __attribute__((overloadable)) convert_uchar4(short4);
+
+
+/*
+ * CLAMP
+ */
+
+extern float __attribute__((overloadable)) clamp(float amount, float low, float high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+
+extern float2 __attribute__((overloadable)) clamp(float2 amount, float2 low, float2 high) {
+    float2 r;
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) clamp(float3 amount, float3 low, float3 high) {
+    float3 r;
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) clamp(float4 amount, float4 low, float4 high) {
+    float4 r;
+    r.x = amount.x < low.x ? low.x : (amount.x > high.x ? high.x : amount.x);
+    r.y = amount.y < low.y ? low.y : (amount.y > high.y ? high.y : amount.y);
+    r.z = amount.z < low.z ? low.z : (amount.z > high.z ? high.z : amount.z);
+    r.w = amount.w < low.w ? low.w : (amount.w > high.w ? high.w : amount.w);
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) clamp(float2 amount, float low, float high) {
+    float2 r;
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) clamp(float3 amount, float low, float high) {
+    float3 r;
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) clamp(float4 amount, float low, float high) {
+    float4 r;
+    r.x = amount.x < low ? low : (amount.x > high ? high : amount.x);
+    r.y = amount.y < low ? low : (amount.y > high ? high : amount.y);
+    r.z = amount.z < low ? low : (amount.z > high ? high : amount.z);
+    r.w = amount.w < low ? low : (amount.w > high ? high : amount.w);
+    return r;
+}
+
+
+/*
+ * FMAX
+ */
+
+extern float __attribute__((overloadable)) fmax(float v1, float v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmax(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmax(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmax(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x > v2 ? v1.x : v2;
+    r.y = v1.y > v2 ? v1.y : v2;
+    r.z = v1.z > v2 ? v1.z : v2;
+    r.w = v1.w > v2 ? v1.w : v2;
+    return r;
+}
+
+extern float __attribute__((overloadable)) fmin(float v1, float v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+
+/*
+ * FMIN
+ */
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float2 v2) {
+    float2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float3 v2) {
+    float3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float4 v2) {
+    float4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float2 __attribute__((overloadable)) fmin(float2 v1, float v2) {
+    float2 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    return r;
+}
+
+extern float3 __attribute__((overloadable)) fmin(float3 v1, float v2) {
+    float3 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) fmin(float4 v1, float v2) {
+    float4 r;
+    r.x = v1.x < v2 ? v1.x : v2;
+    r.y = v1.y < v2 ? v1.y : v2;
+    r.z = v1.z < v2 ? v1.z : v2;
+    r.w = v1.w < v2 ? v1.w : v2;
+    return r;
+}
+
+
+/*
+ * MAX
+ */
+
+extern char __attribute__((overloadable)) max(char v1, char v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) max(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) max(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) max(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern short __attribute__((overloadable)) max(short v1, short v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) max(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) max(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) max(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int __attribute__((overloadable)) max(int v1, int v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) max(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) max(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) max(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) max(int64_t v1, int64_t v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) max(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) max(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) max(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) max(uchar v1, uchar v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) max(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) max(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) max(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) max(ushort v1, ushort v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) max(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) max(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) max(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) max(uint v1, uint v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) max(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) max(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) max(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) max(ulong v1, ulong v2) {
+    return v1 > v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) max(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) max(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) max(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x > v2.x ? v1.x : v2.x;
+    r.y = v1.y > v2.y ? v1.y : v2.y;
+    r.z = v1.z > v2.z ? v1.z : v2.z;
+    r.w = v1.w > v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) max(float v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float2 v2) {
+    return fmax(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) max(float2 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float3 v2) {
+    return fmax(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) max(float3 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float4 v2) {
+    return fmax(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) max(float4 v1, float v2) {
+    return fmax(v1, v2);
+}
+
+
+/*
+ * MIN
+ */
+
+extern int8_t __attribute__((overloadable)) min(int8_t v1, int8_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern char2 __attribute__((overloadable)) min(char2 v1, char2 v2) {
+    char2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern char3 __attribute__((overloadable)) min(char3 v1, char3 v2) {
+    char3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern char4 __attribute__((overloadable)) min(char4 v1, char4 v2) {
+    char4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int16_t __attribute__((overloadable)) min(int16_t v1, int16_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern short2 __attribute__((overloadable)) min(short2 v1, short2 v2) {
+    short2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern short3 __attribute__((overloadable)) min(short3 v1, short3 v2) {
+    short3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern short4 __attribute__((overloadable)) min(short4 v1, short4 v2) {
+    short4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int32_t __attribute__((overloadable)) min(int32_t v1, int32_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern int2 __attribute__((overloadable)) min(int2 v1, int2 v2) {
+    int2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern int3 __attribute__((overloadable)) min(int3 v1, int3 v2) {
+    int3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern int4 __attribute__((overloadable)) min(int4 v1, int4 v2) {
+    int4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern int64_t __attribute__((overloadable)) min(int64_t v1, int64_t v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern long2 __attribute__((overloadable)) min(long2 v1, long2 v2) {
+    long2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern long3 __attribute__((overloadable)) min(long3 v1, long3 v2) {
+    long3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern long4 __attribute__((overloadable)) min(long4 v1, long4 v2) {
+    long4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uchar __attribute__((overloadable)) min(uchar v1, uchar v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uchar2 __attribute__((overloadable)) min(uchar2 v1, uchar2 v2) {
+    uchar2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uchar3 __attribute__((overloadable)) min(uchar3 v1, uchar3 v2) {
+    uchar3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uchar4 __attribute__((overloadable)) min(uchar4 v1, uchar4 v2) {
+    uchar4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ushort __attribute__((overloadable)) min(ushort v1, ushort v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ushort2 __attribute__((overloadable)) min(ushort2 v1, ushort2 v2) {
+    ushort2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ushort3 __attribute__((overloadable)) min(ushort3 v1, ushort3 v2) {
+    ushort3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ushort4 __attribute__((overloadable)) min(ushort4 v1, ushort4 v2) {
+    ushort4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern uint __attribute__((overloadable)) min(uint v1, uint v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern uint2 __attribute__((overloadable)) min(uint2 v1, uint2 v2) {
+    uint2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern uint3 __attribute__((overloadable)) min(uint3 v1, uint3 v2) {
+    uint3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern uint4 __attribute__((overloadable)) min(uint4 v1, uint4 v2) {
+    uint4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern ulong __attribute__((overloadable)) min(ulong v1, ulong v2) {
+    return v1 < v2 ? v1 : v2;
+}
+
+extern ulong2 __attribute__((overloadable)) min(ulong2 v1, ulong2 v2) {
+    ulong2 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    return r;
+}
+
+extern ulong3 __attribute__((overloadable)) min(ulong3 v1, ulong3 v2) {
+    ulong3 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    return r;
+}
+
+extern ulong4 __attribute__((overloadable)) min(ulong4 v1, ulong4 v2) {
+    ulong4 r;
+    r.x = v1.x < v2.x ? v1.x : v2.x;
+    r.y = v1.y < v2.y ? v1.y : v2.y;
+    r.z = v1.z < v2.z ? v1.z : v2.z;
+    r.w = v1.w < v2.w ? v1.w : v2.w;
+    return r;
+}
+
+extern float __attribute__((overloadable)) min(float v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float2 v2) {
+    return fmin(v1, v2);
+}
+
+extern float2 __attribute__((overloadable)) min(float2 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float3 v2) {
+    return fmin(v1, v2);
+}
+
+extern float3 __attribute__((overloadable)) min(float3 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float4 v2) {
+    return fmin(v1, v2);
+}
+
+extern float4 __attribute__((overloadable)) min(float4 v1, float v2) {
+    return fmin(v1, v2);
+}
+
+
+/*
+ * YUV
+ */
+
+extern uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v) {
+    short Y = ((short)y) - 16;
+    short U = ((short)u) - 128;
+    short V = ((short)v) - 128;
+
+    short4 p;
+    p.r = (Y * 298 + V * 409 + 128) >> 8;
+    p.g = (Y * 298 - U * 100 - V * 208 + 128) >> 8;
+    p.b = (Y * 298 + U * 516 + 128) >> 8;
+    p.a = 255;
+    p.r = rsClamp(p.r, (short)0, (short)255);
+    p.g = rsClamp(p.g, (short)0, (short)255);
+    p.b = rsClamp(p.b, (short)0, (short)255);
+
+    return convert_uchar4(p);
+}
+
+static float4 yuv_U_values = {0.f, -0.392f * 0.003921569f, +2.02 * 0.003921569f, 0.f};
+static float4 yuv_V_values = {1.603f * 0.003921569f, -0.815f * 0.003921569f, 0.f, 0.f};
+
+extern float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v) {
+    float4 color = (float)y * 0.003921569f;
+    float4 fU = ((float)u) - 128.f;
+    float4 fV = ((float)v) - 128.f;
+
+    color += fU * yuv_U_values;
+    color += fV * yuv_V_values;
+    color = clamp(color, 0.f, 1.f);
+    return color;
+}
+
diff --git a/lib/Renderscript/runtime/arch/neon.ll b/lib/Renderscript/runtime/arch/neon.ll
new file mode 100644
index 0000000..5e6c614
--- /dev/null
+++ b/lib/Renderscript/runtime/arch/neon.ll
@@ -0,0 +1,737 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;               INTRINSICS               ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                HELPERS                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define internal <4 x float> @smear_4f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+define internal <2 x float> @smear_2f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <2 x float> undef, float %in, i32 0
+  %2 = insertelement <2 x float> %1, float %in, i32 1
+  ret <2 x float> %2
+}
+
+define internal <4 x i32> @smear_4i32(i32 %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+  %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+  %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+  %4 = insertelement <4 x i32> %3, i32 %in, i32 3
+  ret <4 x i32> %4
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 CLAMP                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %low, <4 x float> %high) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %value, <4 x float> %high) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %low) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <4 x float> @_Z5clampDv4_fff(<4 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %out = tail call <4 x float> @_Z5clampDv4_fS_S_(<4 x float> %value, <4 x float> %_low, <4 x float> %_high) nounwind readonly
+  ret <4 x float> %out
+}
+
+define <3 x float> @_Z5clampDv3_fS_S_(<3 x float> %value, <3 x float> %low, <3 x float> %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_low = shufflevector <3 x float> %low, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = shufflevector <3 x float> %high, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <3 x float> @_Z5clampDv3_fff(<3 x float> %value, float %low, float %high) nounwind readonly {
+  %_value = shufflevector <3 x float> %value, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %_high = tail call <4 x float> @smear_4f(float %high) nounwind readnone
+  %_low = tail call <4 x float> @smear_4f(float %low) nounwind readnone
+  %a = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %_value, <4 x float> %_high) nounwind readnone
+  %b = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %a, <4 x float> %_low) nounwind readnone
+  %c = shufflevector <4 x float> %b, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z5clampDv2_fS_S_(<2 x float> %value, <2 x float> %low, <2 x float> %high) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %high) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %1, <2 x float> %low) nounwind readnone
+  ret <2 x float> %2
+}
+
+define <2 x float> @_Z5clampDv2_fff(<2 x float> %value, float %low, float %high) nounwind readonly {
+  %_high = tail call <2 x float> @smear_2f(float %high) nounwind readnone
+  %_low = tail call <2 x float> @smear_2f(float %low) nounwind readnone
+  %a = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %value, <2 x float> %_high) nounwind readnone
+  %b = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %a, <2 x float> %_low) nounwind readnone
+  ret <2 x float> %b
+}
+
+
+define float @_Z5clampfff(float %value, float %low, float %high) nounwind readonly {
+  %_value = tail call <2 x float> @smear_2f(float %value) nounwind readnone
+  %_low = tail call <2 x float> @smear_2f(float %low) nounwind readnone
+  %_high = tail call <2 x float> @smear_2f(float %high) nounwind readnone
+  %a = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %_value, <2 x float> %_high) nounwind readnone
+  %b = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %a, <2 x float> %_low) nounwind readnone
+  %c = extractelement <2 x float> %b, i32 0
+  ret float %c
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMAX                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fmaxff(float %v1, float %v2) nounwind readonly {
+  %1 = fcmp ogt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FMIN                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readonly {
+  %1 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %v2) nounwind readnone
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %2 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %v1, <4 x float> %1) nounwind readnone
+  ret <4 x float> %2
+}
+
+define <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x float> %v2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %4
+}
+
+define <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2) nounwind readonly {
+  %1 = shufflevector <3 x float> %v1, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = tail call <4 x float> @smear_4f(float %v2) nounwind readnone
+  %3 = tail call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %1, <4 x float> %2) nounwind readnone
+  %c = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %c
+}
+
+define <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readonly {
+  %1 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %v2) nounwind readnone
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2) nounwind readonly {
+  %1 = tail call <2 x float> @smear_2f(float %v2) nounwind readnone
+  %2 = tail call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %v1, <2 x float> %1) nounwind readnone
+  ret <2 x float> %2
+}
+
+define float @_Z4fminff(float %v1, float %v2) nounwind readnone {
+  %1 = fcmp olt float %v1, %v2
+  %2 = select i1 %1, float %v1, float %v2
+  ret float %2
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MAX                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3maxcc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp sgt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3maxss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp sgt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp sgt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp sgt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3maxhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3maxDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3maxDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3maxDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3maxtt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ugt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3maxDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3maxDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3maxDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3maxjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ugt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3maxDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3maxDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3maxDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3maxyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ugt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3maxff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fmaxff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3maxDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3maxDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fmaxDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3maxDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fmaxDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3maxDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fmaxDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  MIN                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define signext i8 @_Z3mincc(i8 signext %v1, i8 signext %v2) nounwind readnone {
+  %1 = icmp slt i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = sext <2 x i8> %v1 to <2 x i32>
+  %2 = sext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_cS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = sext <3 x i8> %v1 to <3 x i32>
+  %2 = sext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_cS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = sext <4 x i8> %v1 to <4 x i32>
+  %2 = sext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define signext i16 @_Z3minss(i16 signext %v1, i16 signext %v2) nounwind readnone {
+  %1 = icmp slt i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_sS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = sext <2 x i16> %v1 to <2 x i32>
+  %2 = sext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_sS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = sext <3 x i16> %v1 to <3 x i32>
+  %2 = sext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_sS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = sext <4 x i16> %v1 to <4 x i32>
+  %2 = sext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minii(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp slt i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_iS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_iS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vmins.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_iS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minxx(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp slt i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define zeroext i8 @_Z3minhh(i8 zeroext %v1, i8 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i8 %v1, %v2
+  %2 = select i1 %1, i8 %v1, i8 %v2
+  ret i8 %2
+}
+
+define <2 x i8> @_Z3minDv2_hS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone {
+  %1 = zext <2 x i8> %v1 to <2 x i32>
+  %2 = zext <2 x i8> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i8>
+  ret <2 x i8> %4
+}
+
+define <3 x i8> @_Z3minDv3_hS_(<3 x i8> %v1, <3 x i8> %v2) nounwind readnone {
+  %1 = zext <3 x i8> %v1 to <3 x i32>
+  %2 = zext <3 x i8> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i8>
+  ret <3 x i8> %7
+}
+
+define <4 x i8> @_Z3minDv4_hS_(<4 x i8> %v1, <4 x i8> %v2) nounwind readnone {
+  %1 = zext <4 x i8> %v1 to <4 x i32>
+  %2 = zext <4 x i8> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  ret <4 x i8> %4
+}
+
+define zeroext i16 @_Z3mintt(i16 zeroext %v1, i16 zeroext %v2) nounwind readnone {
+  %1 = icmp ult i16 %v1, %v2
+  %2 = select i1 %1, i16 %v1, i16 %v2
+  ret i16 %2
+}
+
+define <2 x i16> @_Z3minDv2_tS_(<2 x i16> %v1, <2 x i16> %v2) nounwind readnone {
+  %1 = zext <2 x i16> %v1 to <2 x i32>
+  %2 = zext <2 x i16> %v2 to <2 x i32>
+  %3 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %1, <2 x i32> %2) nounwind readnone
+  %4 = trunc <2 x i32> %3 to <2 x i16>
+  ret <2 x i16> %4
+}
+
+define <3 x i16> @_Z3minDv3_tS_(<3 x i16> %v1, <3 x i16> %v2) nounwind readnone {
+  %1 = zext <3 x i16> %v1 to <3 x i32>
+  %2 = zext <3 x i16> %v2 to <3 x i32>
+  %3 = shufflevector <3 x i32> %1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %4 = shufflevector <3 x i32> %2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %3, <4 x i32> %4) nounwind readnone
+  %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  %7 = trunc <3 x i32> %6 to <3 x i16>
+  ret <3 x i16> %7
+}
+
+define <4 x i16> @_Z3minDv4_tS_(<4 x i16> %v1, <4 x i16> %v2) nounwind readnone {
+  %1 = zext <4 x i16> %v1 to <4 x i32>
+  %2 = zext <4 x i16> %v2 to <4 x i32>
+  %3 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = trunc <4 x i32> %3 to <4 x i16>
+  ret <4 x i16> %4
+}
+
+define i32 @_Z3minjj(i32 %v1, i32 %v2) nounwind readnone {
+  %1 = icmp ult i32 %v1, %v2
+  %2 = select i1 %1, i32 %v1, i32 %v2
+  ret i32 %2
+}
+
+define <2 x i32> @_Z3minDv2_jS_(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone {
+  %1 = tail call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %v1, <2 x i32> %v2) nounwind readnone
+  ret <2 x i32> %1
+}
+
+define <3 x i32> @_Z3minDv3_jS_(<3 x i32> %v1, <3 x i32> %v2) nounwind readnone {
+  %1 = shufflevector <3 x i32> %v1, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %2 = shufflevector <3 x i32> %v2, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x i32   > @llvm.arm.neon.vminu.v4i32(<4 x i32> %1, <4 x i32> %2) nounwind readnone
+  %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x i32> %4
+}
+
+define <4 x i32> @_Z3minDv4_jS_(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone {
+  %1 = tail call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %v1, <4 x i32> %v2) nounwind readnone
+  ret <4 x i32> %1
+}
+
+define i64 @_Z3minyy(i64 %v1, i64 %v2) nounwind readnone {
+  %1 = icmp ult i64 %v1, %v2
+  %2 = select i1 %1, i64 %v1, i64 %v2
+  ret i64 %2
+}
+
+; TODO:  long vector types
+
+define float @_Z3minff(float %v1, float %v2) nounwind readnone {
+  %1 = tail call float @_Z4fminff(float %v1, float %v2)
+  ret float %1
+}
+
+define <2 x float> @_Z3minDv2_fS_(<2 x float> %v1, <2 x float> %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_fS_(<2 x float> %v1, <2 x float> %v2)
+  ret <2 x float> %1
+}
+
+define <2 x float> @_Z3minDv2_ff(<2 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <2 x float> @_Z4fminDv2_ff(<2 x float> %v1, float %v2)
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_fS_(<3 x float> %v1, <3 x float> %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_fS_(<3 x float> %v1, <3 x float> %v2)
+  ret <3 x float> %1
+}
+
+define <3 x float> @_Z3minDv3_ff(<3 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <3 x float> @_Z4fminDv3_ff(<3 x float> %v1, float %v2)
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_fS_(<4 x float> %v1, <4 x float> %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_fS_(<4 x float> %v1, <4 x float> %v2)
+  ret <4 x float> %1
+}
+
+define <4 x float> @_Z3minDv4_ff(<4 x float> %v1, float %v2) nounwind readnone {
+  %1 = tail call <4 x float> @_Z4fminDv4_ff(<4 x float> %v1, float %v2)
+  ret <4 x float> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  YUV                   ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+@yuv_U = internal constant <4 x i32> <i32 0, i32 -100, i32 516, i32 0>, align 16
+@yuv_V = internal constant <4 x i32> <i32 409, i32 -208, i32 0, i32 0>, align 16
+@yuv_0 = internal constant <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+@yuv_255 = internal constant <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, align 16
+
+
+define <4 x i8> @_Z18rsYuvToRGBA_uchar4hhh(i8 %pY, i8 %pU, i8 %pV) nounwind readnone alwaysinline {
+  %_sy = zext i8 %pY to i32
+  %_su = zext i8 %pU to i32
+  %_sv = zext i8 %pV to i32
+
+  %_sy2 = add i32 -16, %_sy
+  %_sy3 = mul i32 298, %_sy2
+  %_su2 = add i32 -128, %_su
+  %_sv2 = add i32 -128, %_sv
+  %_y = tail call <4 x i32> @smear_4i32(i32 %_sy3) nounwind readnone
+  %_u = tail call <4 x i32> @smear_4i32(i32 %_su2) nounwind readnone
+  %_v = tail call <4 x i32> @smear_4i32(i32 %_sv2) nounwind readnone
+
+  %mu = load <4 x i32>* @yuv_U, align 8
+  %mv = load <4 x i32>* @yuv_V, align 8
+  %_u2 = mul <4 x i32> %_u, %mu
+  %_v2 = mul <4 x i32> %_v, %mv
+  %_y2 = add <4 x i32> %_y, %_u2
+  %_y3 = add <4 x i32> %_y2, %_v2
+
+ ; %r1 = tail call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %_y3, <4 x i32> <i32 8, i32 8, i32 8, i32 8>) nounwind readnone
+;  %r2 = trunc <4 x i16> %r1 to <4 x i8>
+;  ret <4 x i8> %r2
+
+  %c0 = load <4 x i32>* @yuv_0, align 8
+  %c255 = load <4 x i32>* @yuv_255, align 8
+  %r1 = tail call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %_y3, <4 x i32> %c0) nounwind readnone
+  %r2 = tail call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %r1, <4 x i32> %c255) nounwind readnone
+  %r3 = lshr <4 x i32> %r2, <i32 8, i32 8, i32 8, i32 8>
+  %r4 = trunc <4 x i32> %r3 to <4 x i8>
+  ret <4 x i8> %r4
+}
+
diff --git a/lib/Renderscript/runtime/build_bc_lib.mk b/lib/Renderscript/runtime/build_bc_lib.mk
new file mode 100644
index 0000000..58f5f6e
--- /dev/null
+++ b/lib/Renderscript/runtime/build_bc_lib.mk
@@ -0,0 +1,65 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+# We need to pass the +long64 flag to the underlying version of Clang, since
+# we are generating a library for use with Renderscript (64-bit long type,
+# not 32-bit).
+bc_clang_cc1_cflags := -target-feature +long64
+bc_translated_clang_cc1_cflags := $(addprefix -Xclang , $(bc_clang_cc1_cflags))
+
+bc_cflags := -MD \
+             -DRS_VERSION=$(RS_VERSION) \
+             -std=c99 \
+             -c \
+             -O3 \
+             -fno-builtin \
+             -emit-llvm \
+             -ccc-host-triple armv7-none-linux-gnueabi \
+             -fsigned-char \
+	     $(bc_translated_clang_cc1_cflags)
+
+c_sources := $(filter %.c,$(LOCAL_SRC_FILES))
+ll_sources := $(filter %.ll,$(LOCAL_SRC_FILES))
+
+c_bc_files := $(patsubst %.c,%.bc, \
+    $(addprefix $(intermediates)/, $(c_sources)))
+
+ll_bc_files := $(patsubst %.ll,%.bc, \
+    $(addprefix $(intermediates)/, $(ll_sources)))
+
+$(c_bc_files): PRIVATE_INCLUDES := \
+    frameworks/rs/scriptc \
+    external/clang/lib/Headers
+
+$(c_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.c  $(CLANG)
+	@mkdir -p $(dir $@)
+	$(hide) $(CLANG) $(addprefix -I, $(PRIVATE_INCLUDES)) $(bc_cflags) $< -o $@
+
+$(ll_bc_files): $(intermediates)/%.bc: $(LOCAL_PATH)/%.ll $(LLVM_AS)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_AS) $< -o $@
+
+-include $(c_bc_files:%.bc=%.d)
+-include $(ll_bc_files:%.bc=%.d)
+
+$(LOCAL_BUILT_MODULE): PRIVATE_BC_FILES := $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(c_bc_files) $(ll_bc_files)
+$(LOCAL_BUILT_MODULE): $(LLVM_LINK) $(clcore_LLVM_LD)
+$(LOCAL_BUILT_MODULE): $(LLVM_AS)
+	@mkdir -p $(dir $@)
+	$(hide) $(LLVM_LINK) $(PRIVATE_BC_FILES) -o $@
diff --git a/lib/Renderscript/runtime/build_clcore.sh b/lib/Renderscript/runtime/build_clcore.sh
new file mode 100755
index 0000000..329831f
--- /dev/null
+++ b/lib/Renderscript/runtime/build_clcore.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# Usually, manually running build_clcore.sh shouldn't be needed. build_clcore.mk should
+# kick in automatically during Android build process. 
+
+# Generate rs_cl.bc
+# =================
+
+scriptc_path=../../../../base/libs/rs/scriptc
+clang_header_path=../../../../../external/clang/lib/Headers
+
+clang -ccc-host-triple armv7-none-linux-gnueabi -I${scriptc_path} -I${clang_header_path} -c -std=c99 -O3 rs_cl.c -emit-llvm -o rs_cl.bc
+
+# Generate rs_core.bc
+# ===================
+
+clang -ccc-host-triple armv7-none-linux-gnueabi -I${scriptc_path} -I${clang_header_path} -c -std=c99 -O3 rs_core.c -emit-llvm -o rs_core.bc
+
+# Link everything together
+# ========================
+
+llvm-link rs_cl.bc rs_core.bc -o libclcore.bc
diff --git a/lib/Renderscript/runtime/convert.ll b/lib/Renderscript/runtime/convert.ll
new file mode 100644
index 0000000..f45850d
--- /dev/null
+++ b/lib/Renderscript/runtime/convert.ll
@@ -0,0 +1,731 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  FLOAT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <2 x float> @_Z14convert_float2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i8> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i8> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i8> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i16> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i16> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i16> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = uitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <2 x i32> %in to <2 x float>
+  ret <2 x float> %1
+}
+
+define <3 x float> @_Z14convert_float3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <3 x i32> %in to <3 x float>
+  ret <3 x float> %1
+}
+
+define <4 x float> @_Z14convert_float4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = sitofp <4 x i32> %in to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x float> @_Z14convert_float2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  ret <2 x float> %in
+}
+
+define <3 x float> @_Z14convert_float3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  ret <3 x float> %in
+}
+
+define <4 x float> @_Z14convert_float4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  ret <4 x float> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  CHAR                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+define <4 x i8> @_Z13convert_char4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z13convert_char4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z13convert_char3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z13convert_char2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UCHAR                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  ret <4 x i8> %in
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  ret <3 x i8> %in
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  ret <2 x i8> %in
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i16> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i16> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i16> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+define <4 x i8> @_Z14convert_uchar4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i8>
+  ret <4 x i8> %1
+}
+
+define <3 x i8> @_Z14convert_uchar3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i8>
+  ret <3 x i8> %1
+}
+
+define <2 x i8> @_Z14convert_uchar2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i8>
+  ret <2 x i8> %1
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  SHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z14convert_short4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z14convert_short4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z14convert_short3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z14convert_short2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                 USHORT                 ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i16> @_Z15convert_ushort4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  ret <4 x i16> %in
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  ret <3 x i16> %in
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  ret <2 x i16> %in
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+define <4 x i16> @_Z15convert_ushort4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <4 x i32> %in to <4 x i16>
+  ret <4 x i16> %1
+}
+
+define <3 x i16> @_Z15convert_ushort3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <3 x i32> %in to <3 x i16>
+  ret <3 x i16> %1
+}
+
+define <2 x i16> @_Z15convert_ushort2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  %1 = trunc <2 x i32> %in to <2 x i16>
+  ret <2 x i16> %1
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                   INT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z12convert_int4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptosi <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = sext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z12convert_int4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z12convert_int3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z12convert_int2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;                  UINT                  ;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define <4 x i32> @_Z13convert_uint4Dv4_f(<4 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <4 x float> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_f(<3 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <3 x float> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_f(<2 x float> %in) nounwind readnone alwaysinline {
+  %1 = fptoui <2 x float> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_h(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_h(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_h(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_c(<4 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i8> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_c(<3 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i8> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_c(<2 x i8> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i8> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_t(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_t(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_t(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_s(<4 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <4 x i16> %in to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_s(<3 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <3 x i16> %in to <3 x i32>
+  ret <3 x i32> %1
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_s(<2 x i16> %in) nounwind readnone alwaysinline {
+  %1 = zext <2 x i16> %in to <2 x i32>
+  ret <2 x i32> %1
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_j(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_j(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_j(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
+
+define <4 x i32> @_Z13convert_uint4Dv4_i(<4 x i32> %in) nounwind readnone alwaysinline {
+  ret <4 x i32> %in
+}
+
+define <3 x i32> @_Z13convert_uint3Dv3_i(<3 x i32> %in) nounwind readnone alwaysinline {
+  ret <3 x i32> %in
+}
+
+define <2 x i32> @_Z13convert_uint2Dv2_i(<2 x i32> %in) nounwind readnone alwaysinline {
+  ret <2 x i32> %in
+}
diff --git a/lib/Renderscript/runtime/math.ll b/lib/Renderscript/runtime/math.ll
new file mode 100644
index 0000000..4ea2b10
--- /dev/null
+++ b/lib/Renderscript/runtime/math.ll
@@ -0,0 +1,16 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+declare float @llvm.sqrt.f32(float)
+declare float @llvm.pow.f32(float, float)
+
+define float @_Z4sqrtf(float %v) {
+  %1 = tail call float @llvm.sqrt.f32(float %v)
+  ret float %1
+}
+
+define float @_Z3powf(float %v1, float %v2) {
+  %1 = tail call float @llvm.pow.f32(float  %v1, float %v2)
+  ret float %1
+}
+
diff --git a/lib/Renderscript/runtime/matrix.ll b/lib/Renderscript/runtime/matrix.ll
new file mode 100644
index 0000000..e559d99
--- /dev/null
+++ b/lib/Renderscript/runtime/matrix.ll
@@ -0,0 +1,176 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+
+%struct.rs_matrix4x4 = type { [16 x float] }
+%struct.rs_matrix3x3 = type { [9 x float] }
+%struct.rs_matrix2x2 = type { [4 x float] }
+
+define internal <4 x float> @smear_f(float %in) nounwind readnone alwaysinline {
+  %1 = insertelement <4 x float> undef, float %in, i32 0
+  %2 = insertelement <4 x float> %1, float %in, i32 1
+  %3 = insertelement <4 x float> %2, float %in, i32 2
+  %4 = insertelement <4 x float> %3, float %in, i32 3
+  ret <4 x float> %4
+}
+
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2
+  %pz = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 6
+  %pz2 = bitcast float* %pz to <3 x float>*
+  %zm2 = load <3 x float>* %pz2
+  %zm = shufflevector <3 x float> %zm2, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a4, %a3
+  %a6 = shufflevector <4 x float> %a5, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a6
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv3_f(%struct.rs_matrix3x3* nocapture %m, <3 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2
+  %py = getelementptr inbounds %struct.rs_matrix3x3* %m, i32 0, i32 0, i32 3
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = shufflevector <4 x float> %a3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+  ret <3 x float> %a4
+}
+
+define <3 x float> @_Z16rsMatrixMultiplyP12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <3 x float> @_Z16rsMatrixMultiplyPK12rs_matrix3x3Dv2_f(%struct.rs_matrix3x3* nocapture %m, <2 x float> %in) nounwind
+  ret <3 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %x0 = extractelement <4 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <4 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <4 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+  %w0 = extractelement <4 x float> %in, i32 3
+  %w = tail call <4 x float> @smear_f(float %w0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fmul <4 x float> %y, %ym
+  %a3 = fadd <4 x float> %a1, %a2
+  %a4 = fmul <4 x float> %z, %zm
+  %a5 = fadd <4 x float> %a3, %a4
+  %a6 = fmul <4 x float> %w, %wm
+  %a7 = fadd <4 x float> %a5, %a6
+  ret <4 x float> %a7
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv4_f(%struct.rs_matrix4x4* nocapture %m, <4 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %x0 = extractelement <3 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <3 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+  %z0 = extractelement <3 x float> %in, i32 2
+  %z = tail call <4 x float> @smear_f(float %z0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2
+  %pz = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 8
+  %pz2 = bitcast float* %pz to <4 x float>*
+  %zm = load <4 x float>* %pz2
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  %a5 = fmul <4 x float> %z, %zm
+  %a6 = fadd <4 x float> %a4, %a5
+  ret <4 x float> %a6
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv3_f(%struct.rs_matrix4x4* nocapture %m, <3 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %x0 = extractelement <2 x float> %in, i32 0
+  %x = tail call <4 x float> @smear_f(float %x0) nounwind readnone
+  %y0 = extractelement <2 x float> %in, i32 1
+  %y = tail call <4 x float> @smear_f(float %y0) nounwind readnone
+
+  %px = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 0
+  %px2 = bitcast float* %px to <4 x float>*
+  %xm = load <4 x float>* %px2
+  %py = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 4
+  %py2 = bitcast float* %py to <4 x float>*
+  %ym = load <4 x float>* %py2
+  %pw = getelementptr inbounds %struct.rs_matrix4x4* %m, i32 0, i32 0, i32 12
+  %pw2 = bitcast float* %pw to <4 x float>*
+  %wm = load <4 x float>* %pw2
+
+  %a1 = fmul <4 x float> %x, %xm
+  %a2 = fadd <4 x float> %wm, %a1
+  %a3 = fmul <4 x float> %y, %ym
+  %a4 = fadd <4 x float> %a2, %a3
+  ret <4 x float> %a4
+}
+
+define <4 x float> @_Z16rsMatrixMultiplyP12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind readonly {
+  %r = tail call <4 x float> @_Z16rsMatrixMultiplyPK12rs_matrix4x4Dv2_f(%struct.rs_matrix4x4* nocapture %m, <2 x float> %in) nounwind
+  ret <4 x float> %r
+}
+
diff --git a/lib/Renderscript/runtime/pixel_packing.ll b/lib/Renderscript/runtime/pixel_packing.ll
new file mode 100644
index 0000000..65401a6
--- /dev/null
+++ b/lib/Renderscript/runtime/pixel_packing.ll
@@ -0,0 +1,47 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7-none-linux-gnueabi"
+
+@fc_255.0 = internal constant <4 x float> <float 255.0, float 255.0, float 255.0, float 255.0>, align 16
+@fc_0.5 = internal constant <4 x float> <float 0.5, float 0.5, float 0.5, float 0.5>, align 16
+
+declare <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %in) nounwind readnone
+declare <4 x float> @_Z14convert_float4Dv4_h(<4 x i8> %in) nounwind readnone
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %color) nounwind readnone {
+    %f255 = load <4 x float>* @fc_255.0, align 16
+    %f05 = load <4 x float>* @fc_0.5, align 16
+    %v1 = fmul <4 x float> %f255, %color
+    %v2 = fadd <4 x float> %f05, %v1
+    %v3 = tail call <4 x i8> @_Z14convert_uchar4Dv4_f(<4 x float> %v2) nounwind readnone
+    ret <4 x i8> %v3
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+define <4 x i8> @_Z17rsPackColorTo8888Dv3_f(<3 x float> %color) nounwind readnone {
+    %1 = shufflevector <3 x float> %color, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+    %2 = insertelement <4 x float> %1, float 1.0, i32 3
+    %3 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %2) nounwind readnone
+    ret <4 x i8> %3
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+define <4 x i8> @_Z17rsPackColorTo8888fff(float %r, float %g, float %b) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float 1.0, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
+; uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+define <4 x i8> @_Z17rsPackColorTo8888ffff(float %r, float %g, float %b, float %a) nounwind readnone {
+    %1 = insertelement <4 x float> undef, float %r, i32 0
+    %2 = insertelement <4 x float> %1, float %g, i32 1
+    %3 = insertelement <4 x float> %2, float %b, i32 2
+    %4 = insertelement <4 x float> %3, float %a, i32 3
+    %5 = tail call <4 x i8> @_Z17rsPackColorTo8888Dv4_f(<4 x float> %4) nounwind readnone
+    ret <4 x i8> %5
+}
+
diff --git a/lib/Renderscript/runtime/rs_allocation.c b/lib/Renderscript/runtime/rs_allocation.c
new file mode 100644
index 0000000..dbfb76e
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_allocation.c
@@ -0,0 +1,73 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+// Opaque Allocation type operations
+extern uint32_t __attribute__((overloadable))
+    rsAllocationGetDimX(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.dimensionX;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimY(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.dimensionY;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimZ(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.dimensionZ;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimLOD(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasMipmaps;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsAllocationGetDimFaces(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    return alloc->mHal.state.hasFaces;
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    return &p[eSize * x];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.stride;
+    return &p[(eSize * x) + (y * stride)];
+}
+
+extern const void * __attribute__((overloadable))
+        rsGetElementAt(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.mallocPtr;
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t stride = alloc->mHal.drvState.stride;
+    const uint32_t dimY = alloc->mHal.state.dimensionY;
+    return &p[(eSize * x) + (y * stride) + (z * stride * dimY)];
+}
+
+extern rs_element __attribute__((overloadable))
+        rsAllocationGetElement(rs_allocation a) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    if (alloc == NULL) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    Type_t *type = (Type_t *)alloc->mHal.state.type;
+    rs_element returnElem = {type->mHal.state.element};
+    return returnElem;
+}
diff --git a/lib/Renderscript/runtime/rs_cl.c b/lib/Renderscript/runtime/rs_cl.c
new file mode 100644
index 0000000..677a89a
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_cl.c
@@ -0,0 +1,898 @@
+#include "rs_types.rsh"
+
+// Float ops, 6.11.2
+
+#define FN_FUNC_FN(fnc)                                         \
+extern float2 __attribute__((overloadable)) fnc(float2 v) { \
+    float2 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern float3 __attribute__((overloadable)) fnc(float3 v) { \
+    float3 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern float4 __attribute__((overloadable)) fnc(float4 v) { \
+    float4 r;                                                   \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define IN_FUNC_FN(fnc)                                         \
+extern int2 __attribute__((overloadable)) fnc(float2 v) {   \
+    int2 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    return r;                                                   \
+}                                                               \
+extern int3 __attribute__((overloadable)) fnc(float3 v) {   \
+    int3 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    return r;                                                   \
+}                                                               \
+extern int4 __attribute__((overloadable)) fnc(float4 v) {   \
+    int4 r;                                                     \
+    r.x = fnc(v.x);                                             \
+    r.y = fnc(v.y);                                             \
+    r.z = fnc(v.z);                                             \
+    r.w = fnc(v.w);                                             \
+    return r;                                                   \
+}
+
+#define FN_FUNC_FN_FN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2) { \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2) { \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float4 v2) { \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_F(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, float v2) {  \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, float v2) {  \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, float v2) {  \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_IN(fnc)                                                  \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2) {   \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2) {   \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2) {   \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2.x);                                                  \
+    r.y = fnc(v1.y, v2.y);                                                  \
+    r.z = fnc(v1.z, v2.z);                                                  \
+    r.w = fnc(v1.w, v2.w);                                                  \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_I(fnc)                                                   \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int v2) {    \
+    float2 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int v2) {    \
+    float3 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int v2) {    \
+    float4 r;                                                               \
+    r.x = fnc(v1.x, v2);                                                    \
+    r.y = fnc(v1.y, v2);                                                    \
+    r.z = fnc(v1.z, v2);                                                    \
+    r.w = fnc(v1.w, v2);                                                    \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_PFN(fnc)                     \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 *v2) {            \
+    float2 r;                                   \
+    float t[2];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 *v2) {            \
+    float3 r;                                   \
+    float t[3];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 *v2) {            \
+    float4 r;                                   \
+    float t[4];                                 \
+    r.x = fnc(v1.x, &t[0]);                     \
+    r.y = fnc(v1.y, &t[1]);                     \
+    r.z = fnc(v1.z, &t[2]);                     \
+    r.w = fnc(v1.w, &t[3]);                     \
+    v2->x = t[0];                               \
+    v2->y = t[1];                               \
+    v2->z = t[2];                               \
+    v2->w = t[3];                               \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_PIN(fnc)                                                 \
+extern float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2) {  \
+    float2 r;                                                               \
+    int t[2];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2) {  \
+    float3 r;                                                               \
+    int t[3];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    return r;                                                               \
+}                                                                           \
+extern float4 __attribute__((overloadable)) fnc(float4 v1, int4 *v2) {  \
+    float4 r;                                                               \
+    int t[4];                                                               \
+    r.x = fnc(v1.x, &t[0]);                                                 \
+    r.y = fnc(v1.y, &t[1]);                                                 \
+    r.z = fnc(v1.z, &t[2]);                                                 \
+    r.w = fnc(v1.w, &t[3]);                                                 \
+    v2->x = t[0];                                                           \
+    v2->y = t[1];                                                           \
+    v2->z = t[2];                                                           \
+    v2->w = t[3];                                                           \
+    return r;                                                               \
+}
+
+#define FN_FUNC_FN_FN_FN(fnc)                   \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, float2 v3) {  \
+    float2 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, float3 v3) {  \
+    float3 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, float4 v3) {  \
+    float4 r;                                   \
+    r.x = fnc(v1.x, v2.x, v3.x);                \
+    r.y = fnc(v1.y, v2.y, v3.y);                \
+    r.z = fnc(v1.z, v2.z, v3.z);                \
+    r.w = fnc(v1.w, v2.w, v3.w);                \
+    return r;                                   \
+}
+
+#define FN_FUNC_FN_FN_PIN(fnc)                  \
+extern float2 __attribute__((overloadable)) \
+        fnc(float2 v1, float2 v2, int2 *v3) {   \
+    float2 r;                                   \
+    int t[2];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    return r;                                   \
+}                                               \
+extern float3 __attribute__((overloadable)) \
+        fnc(float3 v1, float3 v2, int3 *v3) {   \
+    float3 r;                                   \
+    int t[3];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    return r;                                   \
+}                                               \
+extern float4 __attribute__((overloadable)) \
+        fnc(float4 v1, float4 v2, int4 *v3) {   \
+    float4 r;                                   \
+    int t[4];                                   \
+    r.x = fnc(v1.x, v2.x, &t[0]);               \
+    r.y = fnc(v1.y, v2.y, &t[1]);               \
+    r.z = fnc(v1.z, v2.z, &t[2]);               \
+    r.w = fnc(v1.w, v2.w, &t[3]);               \
+    v3->x = t[0];                               \
+    v3->y = t[1];                               \
+    v3->z = t[2];                               \
+    v3->w = t[3];                               \
+    return r;                                   \
+}
+
+static const int iposinf = 0x7f800000;
+static const int ineginf = 0xff800000;
+
+static const float posinf() {
+    float f = *((float*)&iposinf);
+    return f;
+}
+
+static const float neginf() {
+    float f = *((float*)&ineginf);
+    return f;
+}
+
+static bool isinf(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == iposinf) || (i == ineginf);
+}
+
+static bool isnan(float f) {
+    int i = *((int*)(void*)&f);
+    return (((i & 0x7f800000) == 0x7f800000) && (i & 0x007fffff));
+}
+
+static bool isposzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x00000000);
+}
+
+static bool isnegzero(float f) {
+    int i = *((int*)(void*)&f);
+    return (i == 0x80000000);
+}
+
+static bool iszero(float f) {
+    return isposzero(f) || isnegzero(f);
+}
+
+
+extern float __attribute__((overloadable)) acos(float);
+FN_FUNC_FN(acos)
+
+extern float __attribute__((overloadable)) acosh(float);
+FN_FUNC_FN(acosh)
+
+
+extern float __attribute__((overloadable)) acospi(float v) {
+    return acos(v) / M_PI;
+}
+FN_FUNC_FN(acospi)
+
+extern float __attribute__((overloadable)) asin(float);
+FN_FUNC_FN(asin)
+
+extern float __attribute__((overloadable)) asinh(float);
+FN_FUNC_FN(asinh)
+
+extern float __attribute__((overloadable)) asinpi(float v) {
+    return asin(v) / M_PI;
+}
+FN_FUNC_FN(asinpi)
+
+extern float __attribute__((overloadable)) atan(float);
+FN_FUNC_FN(atan)
+
+extern float __attribute__((overloadable)) atan2(float, float);
+FN_FUNC_FN_FN(atan2)
+
+extern float __attribute__((overloadable)) atanh(float);
+FN_FUNC_FN(atanh)
+
+extern float __attribute__((overloadable)) atanpi(float v) {
+    return atan(v) / M_PI;
+}
+FN_FUNC_FN(atanpi)
+
+
+extern float __attribute__((overloadable)) atan2pi(float y, float x) {
+    return atan2(y, x) / M_PI;
+}
+FN_FUNC_FN_FN(atan2pi)
+
+extern float __attribute__((overloadable)) cbrt(float);
+FN_FUNC_FN(cbrt)
+
+extern float __attribute__((overloadable)) ceil(float);
+FN_FUNC_FN(ceil)
+
+extern float __attribute__((overloadable)) copysign(float, float);
+FN_FUNC_FN_FN(copysign)
+
+extern float __attribute__((overloadable)) cos(float);
+FN_FUNC_FN(cos)
+
+extern float __attribute__((overloadable)) cosh(float);
+FN_FUNC_FN(cosh)
+
+extern float __attribute__((overloadable)) cospi(float v) {
+    return cos(v * M_PI);
+}
+FN_FUNC_FN(cospi)
+
+extern float __attribute__((overloadable)) erfc(float);
+FN_FUNC_FN(erfc)
+
+extern float __attribute__((overloadable)) erf(float);
+FN_FUNC_FN(erf)
+
+extern float __attribute__((overloadable)) exp(float);
+FN_FUNC_FN(exp)
+
+extern float __attribute__((overloadable)) exp2(float);
+FN_FUNC_FN(exp2)
+
+extern float __attribute__((overloadable)) pow(float, float);
+
+extern float __attribute__((overloadable)) exp10(float v) {
+    return pow(10.f, v);
+}
+FN_FUNC_FN(exp10)
+
+extern float __attribute__((overloadable)) expm1(float);
+FN_FUNC_FN(expm1)
+
+extern float __attribute__((overloadable)) fabs(float);
+FN_FUNC_FN(fabs)
+
+extern float __attribute__((overloadable)) fdim(float, float);
+FN_FUNC_FN_FN(fdim)
+
+extern float __attribute__((overloadable)) floor(float);
+FN_FUNC_FN(floor)
+
+extern float __attribute__((overloadable)) fma(float, float, float);
+FN_FUNC_FN_FN_FN(fma)
+
+extern float __attribute__((overloadable)) fmin(float, float);
+
+extern float __attribute__((overloadable)) fmod(float, float);
+FN_FUNC_FN_FN(fmod)
+
+extern float __attribute__((overloadable)) fract(float v, float *iptr) {
+    int i = (int)floor(v);
+    iptr[0] = i;
+    return fmin(v - i, 0x1.fffffep-1f);
+}
+FN_FUNC_FN_PFN(fract)
+
+extern float __attribute__((overloadable)) frexp(float, int *);
+FN_FUNC_FN_PIN(frexp)
+
+extern float __attribute__((overloadable)) hypot(float, float);
+FN_FUNC_FN_FN(hypot)
+
+extern int __attribute__((overloadable)) ilogb(float);
+IN_FUNC_FN(ilogb)
+
+extern float __attribute__((overloadable)) ldexp(float, int);
+FN_FUNC_FN_IN(ldexp)
+FN_FUNC_FN_I(ldexp)
+
+extern float __attribute__((overloadable)) lgamma(float);
+FN_FUNC_FN(lgamma)
+extern float __attribute__((overloadable)) lgamma(float, int*);
+FN_FUNC_FN_PIN(lgamma)
+
+extern float __attribute__((overloadable)) log(float);
+FN_FUNC_FN(log)
+
+extern float __attribute__((overloadable)) log10(float);
+FN_FUNC_FN(log10)
+
+
+extern float __attribute__((overloadable)) log2(float v) {
+    return log10(v) / log10(2.f);
+}
+FN_FUNC_FN(log2)
+
+extern float __attribute__((overloadable)) log1p(float);
+FN_FUNC_FN(log1p)
+
+extern float __attribute__((overloadable)) logb(float);
+FN_FUNC_FN(logb)
+
+extern float __attribute__((overloadable)) mad(float a, float b, float c) {
+    return a * b + c;
+}
+extern float2 __attribute__((overloadable)) mad(float2 a, float2 b, float2 c) {
+    return a * b + c;
+}
+extern float3 __attribute__((overloadable)) mad(float3 a, float3 b, float3 c) {
+    return a * b + c;
+}
+extern float4 __attribute__((overloadable)) mad(float4 a, float4 b, float4 c) {
+    return a * b + c;
+}
+
+extern float __attribute__((overloadable)) modf(float, float *);
+FN_FUNC_FN_PFN(modf);
+
+extern float __attribute__((overloadable)) nan(uint v) {
+    float f[1];
+    uint32_t *ip = (uint32_t *)f;
+    *ip = v | 0x7fc00000;
+    return f[0];
+}
+
+extern float __attribute__((overloadable)) nextafter(float, float);
+FN_FUNC_FN_FN(nextafter)
+
+FN_FUNC_FN_FN(pow)
+
+extern float __attribute__((overloadable)) pown(float v, int p) {
+    return pow(v, (float)p);
+}
+extern float2 __attribute__((overloadable)) pown(float2 v, int2 p) {
+    return pow(v, (float2)p);
+}
+extern float3 __attribute__((overloadable)) pown(float3 v, int3 p) {
+    return pow(v, (float3)p);
+}
+extern float4 __attribute__((overloadable)) pown(float4 v, int4 p) {
+    return pow(v, (float4)p);
+}
+
+extern float __attribute__((overloadable)) powr(float v, float p) {
+    return pow(v, p);
+}
+extern float2 __attribute__((overloadable)) powr(float2 v, float2 p) {
+    return pow(v, p);
+}
+extern float3 __attribute__((overloadable)) powr(float3 v, float3 p) {
+    return pow(v, p);
+}
+extern float4 __attribute__((overloadable)) powr(float4 v, float4 p) {
+    return pow(v, p);
+}
+
+extern float __attribute__((overloadable)) remainder(float, float);
+FN_FUNC_FN_FN(remainder)
+
+extern float __attribute__((overloadable)) remquo(float, float, int *);
+FN_FUNC_FN_FN_PIN(remquo)
+
+extern float __attribute__((overloadable)) rint(float);
+FN_FUNC_FN(rint)
+
+extern float __attribute__((overloadable)) rootn(float v, int r) {
+    if (r == 0) {
+        return nan(0);
+    }
+
+    if (iszero(v)) {
+        if (r < 0) {
+            if (r & 1) {
+                return copysign(posinf(), v);
+            } else {
+                return posinf();
+            }
+        } else {
+            if (r & 1) {
+                return copysign(0.f, v);
+            } else {
+                return 0.f;
+            }
+        }
+    }
+
+    if (!isinf(v) && !isnan(v) && (v < 0.f)) {
+        if (r & 1) {
+            return (-1.f * pow(-1.f * v, 1.f / r));
+        } else {
+            return nan(0);
+        }
+    }
+
+    return pow(v, 1.f / r);
+}
+FN_FUNC_FN_IN(rootn);
+
+extern float __attribute__((overloadable)) round(float);
+FN_FUNC_FN(round)
+
+
+extern float __attribute__((overloadable)) sqrt(float);
+extern float __attribute__((overloadable)) rsqrt(float v) {
+    return 1.f / sqrt(v);
+}
+FN_FUNC_FN(rsqrt)
+
+extern float __attribute__((overloadable)) sin(float);
+FN_FUNC_FN(sin)
+
+extern float __attribute__((overloadable)) sincos(float v, float *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float2 __attribute__((overloadable)) sincos(float2 v, float2 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float3 __attribute__((overloadable)) sincos(float3 v, float3 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+extern float4 __attribute__((overloadable)) sincos(float4 v, float4 *cosptr) {
+    *cosptr = cos(v);
+    return sin(v);
+}
+
+extern float __attribute__((overloadable)) sinh(float);
+FN_FUNC_FN(sinh)
+
+extern float __attribute__((overloadable)) sinpi(float v) {
+    return sin(v * M_PI);
+}
+FN_FUNC_FN(sinpi)
+
+FN_FUNC_FN(sqrt)
+
+extern float __attribute__((overloadable)) tan(float);
+FN_FUNC_FN(tan)
+
+extern float __attribute__((overloadable)) tanh(float);
+FN_FUNC_FN(tanh)
+
+extern float __attribute__((overloadable)) tanpi(float v) {
+    return tan(v * M_PI);
+}
+FN_FUNC_FN(tanpi)
+
+
+extern float __attribute__((overloadable)) tgamma(float);
+FN_FUNC_FN(tgamma)
+
+extern float __attribute__((overloadable)) trunc(float);
+FN_FUNC_FN(trunc)
+
+// Int ops (partial), 6.11.3
+
+#define XN_FUNC_YN(typeout, fnc, typein)                                \
+extern typeout __attribute__((overloadable)) fnc(typein);               \
+extern typeout##2 __attribute__((overloadable)) fnc(typein##2 v) {  \
+    typeout##2 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##3 __attribute__((overloadable)) fnc(typein##3 v) {  \
+    typeout##3 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    return r;                                                           \
+}                                                                       \
+extern typeout##4 __attribute__((overloadable)) fnc(typein##4 v) {  \
+    typeout##4 r;                                                       \
+    r.x = fnc(v.x);                                                     \
+    r.y = fnc(v.y);                                                     \
+    r.z = fnc(v.z);                                                     \
+    r.w = fnc(v.w);                                                     \
+    return r;                                                           \
+}
+
+
+#define UIN_FUNC_IN(fnc)          \
+XN_FUNC_YN(uchar, fnc, char)      \
+XN_FUNC_YN(ushort, fnc, short)    \
+XN_FUNC_YN(uint, fnc, int)
+
+#define IN_FUNC_IN(fnc)           \
+XN_FUNC_YN(uchar, fnc, uchar)     \
+XN_FUNC_YN(char, fnc, char)       \
+XN_FUNC_YN(ushort, fnc, ushort)   \
+XN_FUNC_YN(short, fnc, short)     \
+XN_FUNC_YN(uint, fnc, uint)       \
+XN_FUNC_YN(int, fnc, int)
+
+
+#define XN_FUNC_XN_XN_BODY(type, fnc, body)         \
+extern type __attribute__((overloadable))       \
+        fnc(type v1, type v2) {                     \
+    return body;                                    \
+}                                                   \
+extern type##2 __attribute__((overloadable))    \
+        fnc(type##2 v1, type##2 v2) {               \
+    type##2 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    return r;                                       \
+}                                                   \
+extern type##3 __attribute__((overloadable))    \
+        fnc(type##3 v1, type##3 v2) {               \
+    type##3 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    return r;                                       \
+}                                                   \
+extern type##4 __attribute__((overloadable))    \
+        fnc(type##4 v1, type##4 v2) {               \
+    type##4 r;                                      \
+    r.x = fnc(v1.x, v2.x);                          \
+    r.y = fnc(v1.y, v2.y);                          \
+    r.z = fnc(v1.z, v2.z);                          \
+    r.w = fnc(v1.w, v2.w);                          \
+    return r;                                       \
+}
+
+#define IN_FUNC_IN_IN_BODY(fnc, body) \
+XN_FUNC_XN_XN_BODY(uchar, fnc, body)  \
+XN_FUNC_XN_XN_BODY(char, fnc, body)   \
+XN_FUNC_XN_XN_BODY(ushort, fnc, body) \
+XN_FUNC_XN_XN_BODY(short, fnc, body)  \
+XN_FUNC_XN_XN_BODY(uint, fnc, body)   \
+XN_FUNC_XN_XN_BODY(int, fnc, body)    \
+XN_FUNC_XN_XN_BODY(float, fnc, body)
+
+UIN_FUNC_IN(abs)
+IN_FUNC_IN(clz)
+
+
+// 6.11.4
+
+
+extern float __attribute__((overloadable)) degrees(float radians) {
+    return radians * (180.f / M_PI);
+}
+extern float2 __attribute__((overloadable)) degrees(float2 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float3 __attribute__((overloadable)) degrees(float3 radians) {
+    return radians * (180.f / M_PI);
+}
+extern float4 __attribute__((overloadable)) degrees(float4 radians) {
+    return radians * (180.f / M_PI);
+}
+
+extern float __attribute__((overloadable)) mix(float start, float stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float2 amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float3 amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float4 amount) {
+    return start + (stop - start) * amount;
+}
+extern float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+extern float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float amount) {
+    return start + (stop - start) * amount;
+}
+
+extern float __attribute__((overloadable)) radians(float degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float2 __attribute__((overloadable)) radians(float2 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float3 __attribute__((overloadable)) radians(float3 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+extern float4 __attribute__((overloadable)) radians(float4 degrees) {
+    return degrees * (M_PI / 180.f);
+}
+
+extern float __attribute__((overloadable)) step(float edge, float v) {
+    return (v < edge) ? 0.f : 1.f;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float2 v) {
+    float2 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float3 v) {
+    float3 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float4 v) {
+    float4 r;
+    r.x = (v.x < edge.x) ? 0.f : 1.f;
+    r.y = (v.y < edge.y) ? 0.f : 1.f;
+    r.z = (v.z < edge.z) ? 0.f : 1.f;
+    r.w = (v.w < edge.w) ? 0.f : 1.f;
+    return r;
+}
+extern float2 __attribute__((overloadable)) step(float2 edge, float v) {
+    float2 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    return r;
+}
+extern float3 __attribute__((overloadable)) step(float3 edge, float v) {
+    float3 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    return r;
+}
+extern float4 __attribute__((overloadable)) step(float4 edge, float v) {
+    float4 r;
+    r.x = (v < edge.x) ? 0.f : 1.f;
+    r.y = (v < edge.y) ? 0.f : 1.f;
+    r.z = (v < edge.z) ? 0.f : 1.f;
+    r.w = (v < edge.w) ? 0.f : 1.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) smoothstep(float, float, float);
+extern float2 __attribute__((overloadable)) smoothstep(float2, float2, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float3, float3, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float4, float4, float4);
+extern float2 __attribute__((overloadable)) smoothstep(float, float, float2);
+extern float3 __attribute__((overloadable)) smoothstep(float, float, float3);
+extern float4 __attribute__((overloadable)) smoothstep(float, float, float4);
+
+extern float __attribute__((overloadable)) sign(float v) {
+    if (v > 0) return 1.f;
+    if (v < 0) return -1.f;
+    return v;
+}
+FN_FUNC_FN(sign)
+
+
+// 6.11.5
+extern float3 __attribute__((overloadable)) cross(float3 lhs, float3 rhs) {
+    float3 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    return r;
+}
+
+extern float4 __attribute__((overloadable)) cross(float4 lhs, float4 rhs) {
+    float4 r;
+    r.x = lhs.y * rhs.z  - lhs.z * rhs.y;
+    r.y = lhs.z * rhs.x  - lhs.x * rhs.z;
+    r.z = lhs.x * rhs.y  - lhs.y * rhs.x;
+    r.w = 0.f;
+    return r;
+}
+
+extern float __attribute__((overloadable)) dot(float lhs, float rhs) {
+    return lhs * rhs;
+}
+extern float __attribute__((overloadable)) dot(float2 lhs, float2 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y;
+}
+extern float __attribute__((overloadable)) dot(float3 lhs, float3 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z;
+}
+extern float __attribute__((overloadable)) dot(float4 lhs, float4 rhs) {
+    return lhs.x*rhs.x + lhs.y*rhs.y + lhs.z*rhs.z + lhs.w*rhs.w;
+}
+
+extern float __attribute__((overloadable)) length(float v) {
+    return v;
+}
+extern float __attribute__((overloadable)) length(float2 v) {
+    return sqrt(v.x*v.x + v.y*v.y);
+}
+extern float __attribute__((overloadable)) length(float3 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+}
+extern float __attribute__((overloadable)) length(float4 v) {
+    return sqrt(v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w);
+}
+
+extern float __attribute__((overloadable)) distance(float lhs, float rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float2 lhs, float2 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float3 lhs, float3 rhs) {
+    return length(lhs - rhs);
+}
+extern float __attribute__((overloadable)) distance(float4 lhs, float4 rhs) {
+    return length(lhs - rhs);
+}
+
+extern float __attribute__((overloadable)) normalize(float v) {
+    return 1.f;
+}
+extern float2 __attribute__((overloadable)) normalize(float2 v) {
+    return v / length(v);
+}
+extern float3 __attribute__((overloadable)) normalize(float3 v) {
+    return v / length(v);
+}
+extern float4 __attribute__((overloadable)) normalize(float4 v) {
+    return v / length(v);
+}
+
+#undef FN_FUNC_FN
+#undef IN_FUNC_FN
+#undef FN_FUNC_FN_FN
+#undef FN_FUNC_FN_F
+#undef FN_FUNC_FN_IN
+#undef FN_FUNC_FN_I
+#undef FN_FUNC_FN_PFN
+#undef FN_FUNC_FN_PIN
+#undef FN_FUNC_FN_FN_FN
+#undef FN_FUNC_FN_FN_PIN
+#undef XN_FUNC_YN
+#undef UIN_FUNC_IN
+#undef IN_FUNC_IN
+#undef XN_FUNC_XN_XN_BODY
+#undef IN_FUNC_IN_IN_BODY
diff --git a/lib/Renderscript/runtime/rs_core.c b/lib/Renderscript/runtime/rs_core.c
new file mode 100644
index 0000000..aaf1336
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_core.c
@@ -0,0 +1,192 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/* Function declarations from libRS */
+extern float4 __attribute__((overloadable)) convert_float4(uchar4 c);
+
+/* Implementation of Core Runtime */
+
+/*
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = 255;
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float r, float g, float b, float a)
+{
+    uchar4 c;
+    c.x = (uchar)(r * 255.f + 0.5f);
+    c.y = (uchar)(g * 255.f + 0.5f);
+    c.z = (uchar)(b * 255.f + 0.5f);
+    c.w = (uchar)(a * 255.f + 0.5f);
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float3 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, 255};
+    return c;
+}
+
+extern uchar4 __attribute__((overloadable)) rsPackColorTo8888(float4 color)
+{
+    color *= 255.f;
+    color += 0.5f;
+    uchar4 c = {color.x, color.y, color.z, color.w};
+    return c;
+}
+*/
+
+extern float4 rsUnpackColor8888(uchar4 c)
+{
+    float4 ret = (float4)0.003921569f;
+    ret *= convert_float4(c);
+    return ret;
+}
+
+/////////////////////////////////////////////////////
+// Matrix ops
+/////////////////////////////////////////////////////
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix4x4 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 4 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix4x4 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 4 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix3x3 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 3 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix3x3 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 3 + col];
+}
+
+extern void __attribute__((overloadable))
+rsMatrixSet(rs_matrix2x2 *m, uint32_t row, uint32_t col, float v) {
+    m->m[row * 2 + col] = v;
+}
+
+extern float __attribute__((overloadable))
+rsMatrixGet(const rs_matrix2x2 *m, uint32_t row, uint32_t col) {
+    return m->m[row * 2 + col];
+}
+
+/*
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float4 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + (m->m[12] * in.w);
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + (m->m[13] * in.w);
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + (m->m[14] * in.w);
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + (m->m[15] * in.w);
+    return ret;
+}
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float4 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float3 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + (m->m[8] * in.z) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + (m->m[9] * in.z) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + (m->m[10] * in.z) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + (m->m[11] * in.z) + m->m[15];
+    return ret;
+}
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix4x4 *m, float2 in) {
+    float4 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[4] * in.y) + m->m[12];
+    ret.y = (m->m[1] * in.x) + (m->m[5] * in.y) + m->m[13];
+    ret.z = (m->m[2] * in.x) + (m->m[6] * in.y) + m->m[14];
+    ret.w = (m->m[3] * in.x) + (m->m[7] * in.y) + m->m[15];
+    return ret;
+}
+extern float4 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix4x4 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix4x4 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float3 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y) + (m->m[6] * in.z);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y) + (m->m[7] * in.z);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y) + (m->m[8] * in.z);
+    return ret;
+}
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float3 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix3x3 *m, float2 in) {
+    float3 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[3] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[4] * in.y);
+    ret.z = (m->m[2] * in.x) + (m->m[5] * in.y);
+    return ret;
+}
+extern float3 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix3x3 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix3x3 *)m, in);
+}
+*/
+
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(const rs_matrix2x2 *m, float2 in) {
+    float2 ret;
+    ret.x = (m->m[0] * in.x) + (m->m[2] * in.y);
+    ret.y = (m->m[1] * in.x) + (m->m[3] * in.y);
+    return ret;
+}
+extern float2 __attribute__((overloadable))
+rsMatrixMultiply(rs_matrix2x2 *m, float2 in) {
+    return rsMatrixMultiply((const rs_matrix2x2 *)m, in);
+}
+
+/////////////////////////////////////////////////////
+// int ops
+/////////////////////////////////////////////////////
+
+extern uint __attribute__((overloadable, always_inline)) rsClamp(uint amount, uint low, uint high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+extern int __attribute__((overloadable, always_inline)) rsClamp(int amount, int low, int high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+extern ushort __attribute__((overloadable, always_inline)) rsClamp(ushort amount, ushort low, ushort high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+extern short __attribute__((overloadable, always_inline)) rsClamp(short amount, short low, short high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+extern uchar __attribute__((overloadable, always_inline)) rsClamp(uchar amount, uchar low, uchar high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
+extern char __attribute__((overloadable, always_inline)) rsClamp(char amount, char low, char high) {
+    return amount < low ? low : (amount > high ? high : amount);
+}
diff --git a/lib/Renderscript/runtime/rs_element.c b/lib/Renderscript/runtime/rs_element.c
new file mode 100644
index 0000000..4db5883
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_element.c
@@ -0,0 +1,111 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Element
+*/
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementCount(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.fieldsCount;
+}
+
+extern rs_element __attribute__((overloadable))
+        rsElementGetSubElement(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        rs_element nullElem = {0};
+        return nullElem;
+    }
+    rs_element returnElem = {element->mHal.state.fields[index]};
+    return returnElem;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementNameLength(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldNameLengths[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementName(rs_element e, uint32_t index, char *name, uint32_t nameLength) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount ||
+        nameLength == 0 || name == 0) {
+        return 0;
+    }
+
+    uint32_t numToCopy = element->mHal.state.fieldNameLengths[index];
+    if (nameLength < numToCopy) {
+        numToCopy = nameLength;
+    }
+    // Place the null terminator manually, in case of partial string
+    numToCopy --;
+    name[numToCopy] = '\0';
+    const char *nameSource = element->mHal.state.fieldNames[index];
+    for (uint32_t i = 0; i < numToCopy; i ++) {
+        name[i] = nameSource[i];
+    }
+    return numToCopy;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementArraySize(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldArraySizes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetSubElementOffsetBytes(rs_element e, uint32_t index) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL || index >= element->mHal.state.fieldsCount) {
+        return 0;
+    }
+    return element->mHal.state.fieldOffsetBytes[index];
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetBytesSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.elementSizeBytes;
+}
+
+extern rs_data_type __attribute__((overloadable))
+        rsElementGetDataType(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_TYPE_INVALID;
+    }
+    return element->mHal.state.dataType;
+}
+
+extern rs_data_kind __attribute__((overloadable))
+        rsElementGetDataKind(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return RS_KIND_INVALID;
+    }
+    return element->mHal.state.dataKind;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsElementGetVectorSize(rs_element e) {
+    Element_t *element = (Element_t *)e.p;
+    if (element == NULL) {
+        return 0;
+    }
+    return element->mHal.state.vectorSize;
+}
diff --git a/lib/Renderscript/runtime/rs_mesh.c b/lib/Renderscript/runtime/rs_mesh.c
new file mode 100644
index 0000000..bb533bc
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_mesh.c
@@ -0,0 +1,55 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Mesh
+*/
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetVertexAllocationCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.vertexBuffersCount;
+}
+
+extern uint32_t __attribute__((overloadable))
+        rsgMeshGetPrimitiveCount(rs_mesh m) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL) {
+        return 0;
+    }
+    return mesh->mHal.state.primitivesCount;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetVertexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.vertexBuffersCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.vertexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_allocation __attribute__((overloadable))
+        rsgMeshGetIndexAllocation(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        rs_allocation nullAlloc = {0};
+        return nullAlloc;
+    }
+    rs_allocation returnAlloc = {mesh->mHal.state.indexBuffers[index]};
+    return returnAlloc;
+}
+
+extern rs_primitive __attribute__((overloadable))
+        rsgMeshGetPrimitive(rs_mesh m, uint32_t index) {
+    Mesh_t *mesh = (Mesh_t *)m.p;
+    if (mesh == NULL || index >= mesh->mHal.state.primitivesCount) {
+        return RS_PRIMITIVE_INVALID;
+    }
+    return mesh->mHal.state.primitives[index];
+}
diff --git a/lib/Renderscript/runtime/rs_program.c b/lib/Renderscript/runtime/rs_program.c
new file mode 100644
index 0000000..64c656f
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_program.c
@@ -0,0 +1,108 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Program Store
+*/
+extern rs_depth_func __attribute__((overloadable))
+        rsgProgramStoreGetDepthFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_DEPTH_FUNC_INVALID;
+    }
+    return prog->mHal.state.depthFunc;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDepthMaskEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.depthWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskRedEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorRWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskGreenEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorGWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskBlueEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorBWriteEnable;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsColorMaskAlphaEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.colorAWriteEnable;
+}
+
+extern rs_blend_src_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendSrcFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_SRC_INVALID;
+    }
+    return prog->mHal.state.blendSrc;
+}
+
+extern rs_blend_dst_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendDstFunc(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return RS_BLEND_DST_INVALID;
+    }
+    return prog->mHal.state.blendDst;
+}
+
+extern bool __attribute__((overloadable))
+        rsgProgramStoreIsDitherEnabled(rs_program_store ps) {
+    ProgramStore_t *prog = (ProgramStore_t *)ps.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.ditherEnable;
+}
+
+/**
+* Program Raster
+*/
+extern bool __attribute__((overloadable))
+        rsgProgramRasterIsPointSpriteEnabled(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return false;
+    }
+    return prog->mHal.state.pointSprite;
+}
+
+extern rs_cull_mode __attribute__((overloadable))
+        rsgProgramRasterGetCullMode(rs_program_raster pr) {
+    ProgramRaster_t *prog = (ProgramRaster_t *)pr.p;
+    if (prog == NULL) {
+        return RS_CULL_INVALID;
+    }
+    return prog->mHal.state.cull;
+}
diff --git a/lib/Renderscript/runtime/rs_sample.c b/lib/Renderscript/runtime/rs_sample.c
new file mode 100644
index 0000000..b41e7f1
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_sample.c
@@ -0,0 +1,391 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Allocation sampling
+*/
+static const void * __attribute__((overloadable))
+        getElementAt(rs_allocation a, uint32_t x, uint32_t lod) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const Type_t *type = (const Type_t*)alloc->mHal.state.type;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.mallocPtr;
+
+    const uint32_t offset = type->mHal.state.lodOffset[lod];
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+
+    return &p[offset + eSize * x];
+}
+
+static const void * __attribute__((overloadable))
+        getElementAt(rs_allocation a, uint32_t x, uint32_t y, uint32_t lod) {
+    Allocation_t *alloc = (Allocation_t *)a.p;
+    const Type_t *type = (const Type_t*)alloc->mHal.state.type;
+    const uint8_t *p = (const uint8_t *)alloc->mHal.drvState.mallocPtr;
+
+    const uint32_t eSize = alloc->mHal.state.elementSizeBytes;
+    const uint32_t offset = type->mHal.state.lodOffset[lod];
+    uint32_t stride;
+    if(lod == 0) {
+        stride = alloc->mHal.drvState.stride;
+    } else {
+        stride = type->mHal.state.lodDimX[lod] * eSize;
+    }
+
+    return &p[offset + (eSize * x) + (y * stride)];
+}
+
+static const void * __attribute__((overloadable))
+        getElementAt(rs_allocation a, uint2 uv, uint32_t lod) {
+    return getElementAt(a, uv.x, uv.y, lod);
+}
+
+static uint32_t wrapI(rs_sampler_value wrap, int32_t coord, int32_t size) {
+    if (wrap == RS_SAMPLER_WRAP) {
+        coord = coord % size;
+        if (coord < 0) {
+            coord += size;
+        }
+    }
+    return (uint32_t)max(0, min(coord, size - 1));
+}
+
+// 565 Conversion bits taken from SkBitmap
+#define SK_R16_BITS     5
+#define SK_G16_BITS     6
+#define SK_B16_BITS     5
+
+#define SK_R16_SHIFT    (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT    (SK_B16_BITS)
+#define SK_B16_SHIFT    0
+
+#define SK_R16_MASK     ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK     ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK     ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color)   (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color)   (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color)   (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+    return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+    return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+    return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c)      SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c)      SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c)      SkB16ToB32(SkGetPackedB16(c))
+
+static float3 getFrom565(uint16_t color) {
+    float3 result;
+    result.x = (float)SkPacked16ToR32(color);
+    result.y = (float)SkPacked16ToG32(color);
+    result.z = (float)SkPacked16ToB32(color);
+    return result;
+}
+
+#define SAMPLE_1D_FUNC(vecsize, intype, outtype, convert)                                       \
+        static outtype __attribute__((overloadable))                                            \
+                getSample##vecsize(rs_allocation a, float2 weights,                             \
+                                   uint32_t iPixel, uint32_t next, uint32_t lod) {              \
+            intype *p0c = (intype*)getElementAt(a, iPixel, lod);                                \
+            intype *p1c = (intype*)getElementAt(a, next, lod);                                  \
+            outtype p0 = convert(*p0c);                                                         \
+            outtype p1 = convert(*p1c);                                                         \
+            return p0 * weights.x + p1 * weights.y;                                             \
+        }
+#define SAMPLE_2D_FUNC(vecsize, intype, outtype, convert)                                       \
+        static outtype __attribute__((overloadable))                                            \
+                    getSample##vecsize(rs_allocation a, float4 weights,                         \
+                                       uint2 iPixel, uint2 next, uint32_t lod) {                \
+            intype *p0c = (intype*)getElementAt(a, iPixel.x, iPixel.y, lod);                    \
+            intype *p1c = (intype*)getElementAt(a, next.x, iPixel.y, lod);                      \
+            intype *p2c = (intype*)getElementAt(a, iPixel.x, next.y, lod);                      \
+            intype *p3c = (intype*)getElementAt(a, next.x, next.y, lod);                        \
+            outtype p0 = convert(*p0c);                                                         \
+            outtype p1 = convert(*p1c);                                                         \
+            outtype p2 = convert(*p2c);                                                         \
+            outtype p3 = convert(*p3c);                                                         \
+            return p0 * weights.x + p1 * weights.y + p2 * weights.z + p3 * weights.w;           \
+        }
+
+SAMPLE_1D_FUNC(1, uchar, float, (float))
+SAMPLE_1D_FUNC(2, uchar2, float2, convert_float2)
+SAMPLE_1D_FUNC(3, uchar3, float3, convert_float3)
+SAMPLE_1D_FUNC(4, uchar4, float4, convert_float4)
+SAMPLE_1D_FUNC(565, uint16_t, float3, getFrom565)
+
+SAMPLE_2D_FUNC(1, uchar, float, (float))
+SAMPLE_2D_FUNC(2, uchar2, float2, convert_float2)
+SAMPLE_2D_FUNC(3, uchar3, float3, convert_float3)
+SAMPLE_2D_FUNC(4, uchar4, float4, convert_float4)
+SAMPLE_2D_FUNC(565, uint16_t, float3, getFrom565)
+
+// Sampler function body is the same for all dimensions
+#define SAMPLE_FUNC_BODY()                                                                      \
+{                                                                                               \
+    rs_element elem = rsAllocationGetElement(a);                                                \
+    rs_data_kind dk = rsElementGetDataKind(elem);                                               \
+    rs_data_type dt = rsElementGetDataType(elem);                                               \
+                                                                                                \
+    if (dk == RS_KIND_USER || (dt != RS_TYPE_UNSIGNED_8 && dt != RS_TYPE_UNSIGNED_5_6_5)) {     \
+        float4 zero = {0.0f, 0.0f, 0.0f, 0.0f};                                                 \
+        return zero;                                                                            \
+    }                                                                                           \
+                                                                                                \
+    uint32_t vecSize = rsElementGetVectorSize(elem);                                            \
+    Allocation_t *alloc = (Allocation_t *)a.p;                                                  \
+    const Type_t *type = (const Type_t*)alloc->mHal.state.type;                                 \
+                                                                                                \
+    rs_sampler_value sampleMin = rsSamplerGetMinification(s);                                  \
+    rs_sampler_value sampleMag = rsSamplerGetMagnification(s);                                 \
+                                                                                                \
+    if (lod <= 0.0f) {                                                                          \
+        if (sampleMag == RS_SAMPLER_NEAREST) {                                                  \
+            return sample_LOD_NearestPixel(a, type, vecSize, dt, s, uv, 0);                     \
+        }                                                                                       \
+        return sample_LOD_LinearPixel(a, type, vecSize, dt, s, uv, 0);                          \
+    }                                                                                           \
+                                                                                                \
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_NEAREST) {                                           \
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;                                        \
+        lod = min(lod, (float)maxLOD);                                                          \
+        uint32_t nearestLOD = (uint32_t)round(lod);                                             \
+        return sample_LOD_LinearPixel(a, type, vecSize, dt, s, uv, nearestLOD);                 \
+    }                                                                                           \
+                                                                                                \
+    if (sampleMin == RS_SAMPLER_LINEAR_MIP_LINEAR) {                                            \
+        uint32_t lod0 = (uint32_t)floor(lod);                                                   \
+        uint32_t lod1 = (uint32_t)ceil(lod);                                                    \
+        uint32_t maxLOD = type->mHal.state.lodCount - 1;                                        \
+        lod0 = min(lod0, maxLOD);                                                               \
+        lod1 = min(lod1, maxLOD);                                                               \
+        float4 sample0 = sample_LOD_LinearPixel(a, type, vecSize, dt, s, uv, lod0);             \
+        float4 sample1 = sample_LOD_LinearPixel(a, type, vecSize, dt, s, uv, lod1);             \
+        float frac = lod - (float)lod0;                                                         \
+        return sample0 * (1.0f - frac) + sample1 * frac;                                        \
+    }                                                                                           \
+                                                                                                \
+    return sample_LOD_NearestPixel(a, type, vecSize, dt, s, uv, 0);                             \
+} // End of sampler function body is the same for all dimensions
+
+// Body of the bilinear sampling function
+#define BILINEAR_SAMPLE_BODY()                                                                  \
+{                                                                                               \
+    float4 result;                                                                              \
+    if (dt == RS_TYPE_UNSIGNED_5_6_5) {                                                         \
+        result.xyz = getSample565(a, weights, iPixel, next, lod);                               \
+        return result;                                                                          \
+    }                                                                                           \
+                                                                                                \
+    switch(vecSize) {                                                                           \
+    case 1:                                                                                     \
+        result.x = getSample1(a, weights, iPixel, next, lod);                                   \
+        break;                                                                                  \
+    case 2:                                                                                     \
+        result.xy = getSample2(a, weights, iPixel, next, lod);                                  \
+        break;                                                                                  \
+    case 3:                                                                                     \
+        result.xyz = getSample3(a, weights, iPixel, next, lod);                                 \
+        break;                                                                                  \
+    case 4:                                                                                     \
+        result = getSample4(a, weights, iPixel, next, lod);                                     \
+        break;                                                                                  \
+    }                                                                                           \
+                                                                                                \
+    return result * 0.003921569f;                                                                              \
+} // End of body of the bilinear sampling function
+
+// Body of the nearest sampling function
+#define NEAREST_SAMPLE_BODY()                                                                   \
+{                                                                                               \
+    float4 result;                                                                              \
+    if (dt == RS_TYPE_UNSIGNED_5_6_5) {                                                         \
+        result.xyz = getFrom565(*(uint16_t*)getElementAt(a, iPixel, lod));                      \
+       return result;                                                                           \
+    }                                                                                           \
+                                                                                                \
+    switch(vecSize) {                                                                           \
+    case 1:                                                                                     \
+        result.x = (float)(*((uchar*)getElementAt(a, iPixel, lod)));                            \
+        break;                                                                                  \
+    case 2:                                                                                     \
+        result.xy = convert_float2(*((uchar2*)getElementAt(a, iPixel, lod)));                   \
+        break;                                                                                  \
+    case 3:                                                                                     \
+        result.xyz = convert_float3(*((uchar3*)getElementAt(a, iPixel, lod)));                  \
+        break;                                                                                  \
+    case 4:                                                                                     \
+        result = convert_float4(*((uchar4*)getElementAt(a, iPixel, lod)));                      \
+        break;                                                                                  \
+    }                                                                                           \
+                                                                                                \
+    return result * 0.003921569f;                                                                              \
+} // End of body of the nearest sampling function
+
+static float4 __attribute__((overloadable))
+        getBilinearSample(rs_allocation a, float2 weights,
+                          uint32_t iPixel, uint32_t next,
+                          uint32_t vecSize, rs_data_type dt, uint32_t lod) {
+    BILINEAR_SAMPLE_BODY()
+}
+
+static float4 __attribute__((overloadable))
+        getBilinearSample(rs_allocation a, float4 weights,
+                          uint2 iPixel, uint2 next,
+                          uint32_t vecSize, rs_data_type dt, uint32_t lod) {
+    BILINEAR_SAMPLE_BODY()
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(rs_allocation a, uint32_t iPixel, uint32_t vecSize,
+                         rs_data_type dt, uint32_t lod) {
+    NEAREST_SAMPLE_BODY()
+}
+
+static float4  __attribute__((overloadable))
+        getNearestSample(rs_allocation a, uint2 iPixel, uint32_t vecSize,
+                         rs_data_type dt, uint32_t lod) {
+    NEAREST_SAMPLE_BODY()
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(rs_allocation a, const Type_t *type,
+                               uint32_t vecSize, rs_data_type dt,
+                               rs_sampler s,
+                               float uv, uint32_t lod) {
+    rs_sampler_value wrapS = rsSamplerGetWrapS(s);
+    int32_t sourceW = type->mHal.state.lodDimX[lod];
+    float pixelUV = uv * (float)(sourceW);
+    int32_t iPixel = (int32_t)(pixelUV);
+    float frac = pixelUV - (float)iPixel;
+
+    if (frac < 0.5f) {
+        iPixel -= 1;
+        frac += 0.5f;
+    } else {
+        frac -= 0.5f;
+    }
+
+    float oneMinusFrac = 1.0f - frac;
+
+    float2 weights;
+    weights.x = oneMinusFrac;
+    weights.y = frac;
+
+    uint32_t next = wrapI(wrapS, iPixel + 1, sourceW);
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getBilinearSample(a, weights, location, next, vecSize, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(rs_allocation a, const Type_t *type,
+                                uint32_t vecSize, rs_data_type dt,
+                                rs_sampler s,
+                                float uv, uint32_t lod) {
+    rs_sampler_value wrapS = rsSamplerGetWrapS(s);
+    int32_t sourceW = type->mHal.state.lodDimX[lod];
+    int32_t iPixel = (int32_t)(uv * (float)(sourceW));
+    uint32_t location = wrapI(wrapS, iPixel, sourceW);
+
+    return getNearestSample(a, location, vecSize, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_LinearPixel(rs_allocation a, const Type_t *type,
+                               uint32_t vecSize, rs_data_type dt,
+                               rs_sampler s,
+                               float2 uv, uint32_t lod) {
+    rs_sampler_value wrapS = rsSamplerGetWrapS(s);
+    rs_sampler_value wrapT = rsSamplerGetWrapT(s);
+
+    int32_t sourceW = type->mHal.state.lodDimX[lod];
+    int32_t sourceH = type->mHal.state.lodDimY[lod];
+
+    float2 dimF;
+    dimF.x = (float)(sourceW);
+    dimF.y = (float)(sourceH);
+    float2 pixelUV = uv * dimF;
+    int2 iPixel = convert_int2(pixelUV);
+
+    float2 frac = pixelUV - convert_float2(iPixel);
+
+    if (frac.x < 0.5f) {
+        iPixel.x -= 1;
+        frac.x += 0.5f;
+    } else {
+        frac.x -= 0.5f;
+    }
+    if (frac.y < 0.5f) {
+        iPixel.y -= 1;
+        frac.y += 0.5f;
+    } else {
+        frac.y -= 0.5f;
+    }
+    float2 oneMinusFrac = 1.0f - frac;
+
+    float4 weights;
+    weights.x = oneMinusFrac.x * oneMinusFrac.y;
+    weights.y = frac.x * oneMinusFrac.y;
+    weights.z = oneMinusFrac.x * frac.y;
+    weights.w = frac.x * frac.y;
+
+    uint2 next;
+    next.x = wrapI(wrapS, iPixel.x + 1, sourceW);
+    next.y = wrapI(wrapT, iPixel.y + 1, sourceH);
+    uint2 location;
+    location.x = wrapI(wrapS, iPixel.x, sourceW);
+    location.y = wrapI(wrapT, iPixel.y, sourceH);
+
+    return getBilinearSample(a, weights, location, next, vecSize, dt, lod);
+}
+
+static float4 __attribute__((overloadable))
+        sample_LOD_NearestPixel(rs_allocation a, const Type_t *type,
+                                uint32_t vecSize, rs_data_type dt,
+                                rs_sampler s,
+                                float2 uv, uint32_t lod) {
+    rs_sampler_value wrapS = rsSamplerGetWrapS(s);
+    rs_sampler_value wrapT = rsSamplerGetWrapT(s);
+
+    int32_t sourceW = type->mHal.state.lodDimX[lod];
+    int32_t sourceH = type->mHal.state.lodDimY[lod];
+
+    float2 dimF;
+    dimF.x = (float)(sourceW);
+    dimF.y = (float)(sourceH);
+    int2 iPixel = convert_int2(uv * dimF);
+
+    uint2 location;
+    location.x = wrapI(wrapS, iPixel.x, sourceW);
+    location.y = wrapI(wrapT, iPixel.y, sourceH);
+    return getNearestSample(a, location, vecSize, dt, lod);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float location) {
+    return rsSample(a, s, location, 0);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float uv, float lod) {
+    SAMPLE_FUNC_BODY()
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 location) {
+    return rsSample(a, s, location, 0.0f);
+}
+
+extern const float4 __attribute__((overloadable))
+        rsSample(rs_allocation a, rs_sampler s, float2 uv, float lod) {
+    SAMPLE_FUNC_BODY()
+}
diff --git a/lib/Renderscript/runtime/rs_sampler.c b/lib/Renderscript/runtime/rs_sampler.c
new file mode 100644
index 0000000..39782de
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_sampler.c
@@ -0,0 +1,51 @@
+#include "rs_core.rsh"
+#include "rs_graphics.rsh"
+#include "rs_structs.h"
+
+/**
+* Sampler
+*/
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMinification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.minFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetMagnification(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.magFilter;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapS(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapS;
+}
+
+extern rs_sampler_value __attribute__((overloadable))
+        rsSamplerGetWrapT(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return RS_SAMPLER_INVALID;
+    }
+    return prog->mHal.state.wrapT;
+}
+
+extern float __attribute__((overloadable))
+        rsSamplerGetAnisotropy(rs_sampler s) {
+    Sampler_t *prog = (Sampler_t *)s.p;
+    if (prog == NULL) {
+        return 0.0f;
+    }
+    return prog->mHal.state.aniso;
+}
diff --git a/lib/Renderscript/runtime/rs_structs.h b/lib/Renderscript/runtime/rs_structs.h
new file mode 100644
index 0000000..bef849c
--- /dev/null
+++ b/lib/Renderscript/runtime/rs_structs.h
@@ -0,0 +1,254 @@
+#ifndef _RS_CORE_H_
+#define _RS_CORE_H_
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Allocation owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsAllocation.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetDimX(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * allocations.
+ *
+ *****************************************************************************/
+typedef enum {
+    RS_ALLOCATION_MIPMAP_NONE = 0,
+    RS_ALLOCATION_MIPMAP_FULL = 1,
+    RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE = 2
+} rs_allocation_mipmap_control;
+
+typedef struct Allocation {
+    char __pad[28];
+    struct {
+        void * drv;
+        struct {
+            const void *type;
+            uint32_t usageFlags;
+            rs_allocation_mipmap_control mipmapControl;
+            uint32_t dimensionX;
+            uint32_t dimensionY;
+            uint32_t dimensionZ;
+            uint32_t elementSizeBytes;
+            bool hasMipmaps;
+            bool hasFaces;
+            bool hasReferences;
+            void * usrPtr;
+            int32_t surfaceTextureID;
+            void * wndSurface;
+            void * surfaceTexture;
+        } state;
+
+        struct DrvState {
+            void * mallocPtr;
+            uint32_t stride;
+        } drvState;
+    } mHal;
+} Allocation_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramStore owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramStore.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramStoreGetDepthFunc(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program store.
+ *
+ *****************************************************************************/
+typedef struct ProgramStore {
+    char __pad[36];
+    struct {
+        struct {
+            bool ditherEnable;
+            bool colorRWriteEnable;
+            bool colorGWriteEnable;
+            bool colorBWriteEnable;
+            bool colorAWriteEnable;
+            rs_blend_src_func blendSrc;
+            rs_blend_dst_func blendDst;
+            bool depthWriteEnable;
+            rs_depth_func depthFunc;
+        } state;
+    } mHal;
+} ProgramStore_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class ProgramRaster owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsProgramRaster.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetCullMode(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * program raster.
+ *
+ *****************************************************************************/
+typedef struct ProgramRaster {
+    char __pad[36];
+    struct {
+        struct {
+            bool pointSprite;
+            rs_cull_mode cull;
+        } state;
+    } mHal;
+} ProgramRaster_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Sampler owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsSampler.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsgProgramRasterGetMagFilter(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * samplers.
+ *
+ *****************************************************************************/
+typedef struct Sampler {
+    char __pad[32];
+    struct {
+        struct {
+            rs_sampler_value magFilter;
+            rs_sampler_value minFilter;
+            rs_sampler_value wrapS;
+            rs_sampler_value wrapT;
+            rs_sampler_value wrapR;
+            float aniso;
+        } state;
+    } mHal;
+} Sampler_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Element owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsElement.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsElementGetSubElementCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * elements.
+ *
+ *****************************************************************************/
+typedef struct Element {
+    char __pad[28];
+    struct {
+        void *drv;
+        struct {
+            rs_data_type dataType;
+            rs_data_kind dataKind;
+            uint32_t vectorSize;
+            uint32_t elementSizeBytes;
+
+            // Subelements
+            const void **fields;
+            uint32_t *fieldArraySizes;
+            const char **fieldNames;
+            uint32_t *fieldNameLengths;
+            uint32_t *fieldOffsetBytes;
+            uint32_t fieldsCount;
+        } state;
+    } mHal;
+} Element_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Type owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsType.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsAllocationGetElement(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * types.
+ *
+ *****************************************************************************/
+typedef struct Type {
+    char __pad[28];
+    struct {
+        void *drv;
+        struct {
+            const void * element;
+            uint32_t dimX;
+            uint32_t dimY;
+            uint32_t dimZ;
+            uint32_t *lodDimX;
+            uint32_t *lodDimY;
+            uint32_t *lodDimZ;
+            uint32_t *lodOffset;
+            uint32_t lodCount;
+            bool faces;
+        } state;
+    } mHal;
+} Type_t;
+
+/*****************************************************************************
+ * CAUTION
+ *
+ * The following structure layout provides a more efficient way to access
+ * internal members of the C++ class Mesh owned by librs. Unfortunately,
+ * since this class has virtual members, we can't simply use offsetof() or any
+ * other compiler trickery to dynamically get the appropriate values at
+ * build-time. This layout may need to be updated whenever
+ * frameworks/base/libs/rs/rsMesh.h is modified.
+ *
+ * Having the layout information available in this file allows us to
+ * accelerate functionality like rsMeshGetVertexAllocationCount(). Without this
+ * information, we would not be able to inline the bitcode, thus resulting in
+ * potential runtime performance penalties for tight loops operating on
+ * meshes.
+ *
+ *****************************************************************************/
+typedef struct Mesh {
+    char __pad[28];
+    struct {
+        void *drv;
+        struct {
+            void **vertexBuffers;
+            uint32_t vertexBuffersCount;
+
+            // indexBuffers[i] could be NULL, in which case only primitives[i] is used
+            void **indexBuffers;
+            uint32_t indexBuffersCount;
+            rs_primitive *primitives;
+            uint32_t primitivesCount;
+        } state;
+    } mHal;
+} Mesh_t;
+#endif // _RS_CORE_H_