resolved conflicts for merge of 7940e44f to dalvik-dev

Change-Id: I6529b2fc27dfaedd2cb87b3697d049ccabed36ee
diff --git a/compiler/Android.mk b/compiler/Android.mk
new file mode 100644
index 0000000..40a74c1
--- /dev/null
+++ b/compiler/Android.mk
@@ -0,0 +1,242 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.common.mk
+
+LIBART_COMPILER_SRC_FILES := \
+	dex/local_value_numbering.cc \
+	dex/arena_allocator.cc \
+	dex/arena_bit_vector.cc \
+	dex/quick/arm/assemble_arm.cc \
+	dex/quick/arm/call_arm.cc \
+	dex/quick/arm/fp_arm.cc \
+	dex/quick/arm/int_arm.cc \
+	dex/quick/arm/target_arm.cc \
+	dex/quick/arm/utility_arm.cc \
+	dex/quick/codegen_util.cc \
+	dex/quick/gen_common.cc \
+	dex/quick/gen_invoke.cc \
+	dex/quick/gen_loadstore.cc \
+	dex/quick/local_optimizations.cc \
+	dex/quick/mips/assemble_mips.cc \
+	dex/quick/mips/call_mips.cc \
+	dex/quick/mips/fp_mips.cc \
+	dex/quick/mips/int_mips.cc \
+	dex/quick/mips/target_mips.cc \
+	dex/quick/mips/utility_mips.cc \
+	dex/quick/mir_to_lir.cc \
+	dex/quick/ralloc_util.cc \
+	dex/quick/x86/assemble_x86.cc \
+	dex/quick/x86/call_x86.cc \
+	dex/quick/x86/fp_x86.cc \
+	dex/quick/x86/int_x86.cc \
+	dex/quick/x86/target_x86.cc \
+	dex/quick/x86/utility_x86.cc \
+	dex/portable/mir_to_gbc.cc \
+	dex/dex_to_dex_compiler.cc \
+	dex/mir_dataflow.cc \
+	dex/mir_optimization.cc \
+	dex/frontend.cc \
+	dex/mir_graph.cc \
+	dex/vreg_analysis.cc \
+	dex/ssa_transformation.cc \
+	driver/compiler_driver.cc \
+	driver/dex_compilation_unit.cc \
+	jni/portable/jni_compiler.cc \
+	jni/quick/arm/calling_convention_arm.cc \
+	jni/quick/mips/calling_convention_mips.cc \
+	jni/quick/x86/calling_convention_x86.cc \
+	jni/quick/calling_convention.cc \
+	jni/quick/jni_compiler.cc \
+	llvm/compiler_llvm.cc \
+	llvm/gbc_expander.cc \
+	llvm/generated/art_module.cc \
+	llvm/intrinsic_helper.cc \
+	llvm/ir_builder.cc \
+	llvm/llvm_compilation_unit.cc \
+	llvm/md_builder.cc \
+	llvm/runtime_support_builder.cc \
+	llvm/runtime_support_builder_arm.cc \
+	llvm/runtime_support_builder_thumb2.cc \
+	llvm/runtime_support_builder_x86.cc \
+        stubs/portable/stubs.cc \
+        stubs/quick/stubs.cc \
+	elf_fixup.cc \
+	elf_stripper.cc \
+	elf_writer.cc \
+	elf_writer_quick.cc \
+	image_writer.cc \
+	oat_writer.cc
+
+ifeq ($(ART_SEA_IR_MODE),true)
+LIBART_COMPILER_SRC_FILES += \
+	sea_ir/frontend.cc \
+	sea_ir/instruction_tools.cc
+endif
+
+LIBART_COMPILER_CFLAGS :=
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+  LIBART_COMPILER_SRC_FILES += elf_writer_mclinker.cc
+  LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
+endif
+
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
+	dex/compiler_enums.h
+
+# $(1): target or host
+# $(2): ndebug or debug
+define build-libart-compiler
+  ifneq ($(1),target)
+    ifneq ($(1),host)
+      $$(error expected target or host for argument 1, received $(1))
+    endif
+  endif
+  ifneq ($(2),ndebug)
+    ifneq ($(2),debug)
+      $$(error expected ndebug or debug for argument 2, received $(2))
+    endif
+  endif
+
+  art_target_or_host := $(1)
+  art_ndebug_or_debug := $(2)
+
+  include $(CLEAR_VARS)
+  ifeq ($$(art_target_or_host),target)
+    include external/stlport/libstlport.mk
+  else
+    LOCAL_IS_HOST_MODULE := true
+  endif
+  LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+  ifeq ($$(art_ndebug_or_debug),ndebug)
+    LOCAL_MODULE := libart-compiler
+  else # debug
+    LOCAL_MODULE := libartd-compiler
+  endif
+
+  LOCAL_MODULE_TAGS := optional
+  LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+  LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES)
+
+  GENERATED_SRC_DIR := $$(call intermediates-dir-for,$$(LOCAL_MODULE_CLASS),$$(LOCAL_MODULE),$$(LOCAL_IS_HOST_MODULE),)
+  ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,$$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES))
+  ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES))
+
+$$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py
+$$(ENUM_OPERATOR_OUT_GEN): PRIVATE_CUSTOM_TOOL = art/tools/generate-operator-out.py $(LOCAL_PATH) $$< > $$@
+$$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PATH)/%.h
+	$$(transform-generated-source)
+
+  LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
+
+  LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS)
+  ifeq ($$(art_target_or_host),target)
+    LOCAL_CLANG := $(ART_TARGET_CLANG)
+    LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+  else # host
+    LOCAL_CLANG := $(ART_HOST_CLANG)
+    LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+  endif
+
+  # TODO: clean up the compilers and remove this.
+  LOCAL_CFLAGS += -Wno-unused-parameter
+
+  LOCAL_SHARED_LIBRARIES := liblog
+  ifeq ($$(art_ndebug_or_debug),debug)
+    ifeq ($$(art_target_or_host),target)
+      LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
+    else # host
+      LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
+    endif
+    LOCAL_SHARED_LIBRARIES += libartd
+  else
+    ifeq ($$(art_target_or_host),target)
+      LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
+    else # host
+      LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
+    endif
+    LOCAL_SHARED_LIBRARIES += libart
+  endif
+  LOCAL_SHARED_LIBRARIES += libbcc libLLVM
+
+  ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+    LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
+  endif
+
+  LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+
+  ifeq ($$(art_target_or_host),target)
+    LOCAL_SHARED_LIBRARIES += libstlport
+  else # host
+    LOCAL_LDLIBS := -ldl -lpthread
+  endif
+  LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+  LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+  ifeq ($$(art_target_or_host),target)
+    LOCAL_SHARED_LIBRARIES += libcutils
+    include $(LLVM_GEN_INTRINSICS_MK)
+    include $(LLVM_DEVICE_BUILD_MK)
+    include $(BUILD_SHARED_LIBRARY)
+  else # host
+    LOCAL_STATIC_LIBRARIES += libcutils
+    include $(LLVM_GEN_INTRINSICS_MK)
+    include $(LLVM_HOST_BUILD_MK)
+    include $(BUILD_HOST_SHARED_LIBRARY)
+  endif
+
+  ifeq ($$(art_target_or_host),target)
+    ifeq ($$(art_ndebug_or_debug),debug)
+      $(TARGET_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
+    else
+      $(TARGET_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
+    endif
+  else # host
+    ifeq ($$(art_ndebug_or_debug),debug)
+      $(HOST_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
+    else
+      $(HOST_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
+    endif
+  endif
+
+endef
+
+ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
+  $(eval $(call build-libart-compiler,target,ndebug))
+endif
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+  $(eval $(call build-libart-compiler,target,debug))
+endif
+# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ifeq ($(ART_BUILD_NDEBUG),true)
+  $(eval $(call build-libart-compiler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+  $(eval $(call build-libart-compiler,host,debug))
+endif
+
+# Rule to build /system/lib/libcompiler_rt.a
+# Usually static libraries are not installed on the device.
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ifeq ($(ART_BUILD_TARGET),true)
+# TODO: Move to external/compiler_rt
+$(eval $(call copy-one-file, $(call intermediates-dir-for,STATIC_LIBRARIES,libcompiler_rt,,)/libcompiler_rt.a, $(TARGET_OUT_SHARED_LIBRARIES)/libcompiler_rt.a))
+
+$(DEX2OAT): $(TARGET_OUT_SHARED_LIBRARIES)/libcompiler_rt.a
+
+endif
+endif
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
new file mode 100644
index 0000000..3a3e385
--- /dev/null
+++ b/compiler/dex/arena_allocator.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dex_file-inl.h"
+#include "arena_allocator.h"
+#include "base/logging.h"
+
+namespace art {
+
+static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
+  "Misc       ",
+  "BasicBlock ",
+  "LIR        ",
+  "MIR        ",
+  "DataFlow   ",
+  "GrowList   ",
+  "GrowBitMap ",
+  "Dalvik2SSA ",
+  "DebugInfo  ",
+  "Successor  ",
+  "RegAlloc   ",
+  "Data       ",
+  "Preds      ",
+};
+
+ArenaAllocator::ArenaAllocator(size_t default_size)
+  : default_size_(default_size),
+    block_size_(default_size - sizeof(ArenaMemBlock)),
+    arena_head_(NULL),
+    current_block_(NULL),
+    num_arena_blocks_(0),
+    malloc_bytes_(0),
+    lost_bytes_(0),
+    num_allocations_(0) {
+  memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
+  // Start with an empty arena.
+  arena_head_ = current_block_ = EmptyArenaBlock();
+  num_arena_blocks_++;
+}
+
+ArenaAllocator::~ArenaAllocator() {
+  // Reclaim all the arena blocks allocated so far.
+  ArenaMemBlock* head = arena_head_;
+  while (head != NULL) {
+    ArenaMemBlock* p = head;
+    head = head->next;
+    free(p);
+  }
+  arena_head_ = NULL;
+  num_arena_blocks_ = 0;
+}
+
+// Return an arena with no storage for use as a sentinal.
+ArenaAllocator::ArenaMemBlock* ArenaAllocator::EmptyArenaBlock() {
+  ArenaMemBlock* res = static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock)));
+  malloc_bytes_ += sizeof(ArenaMemBlock);
+  res->block_size = 0;
+  res->bytes_allocated = 0;
+  res->next = NULL;
+  return res;
+}
+
+// Arena-based malloc for compilation tasks.
+void* ArenaAllocator::NewMem(size_t size, bool zero, ArenaAllocKind kind) {
+  DCHECK(current_block_ != NULL);
+  DCHECK(arena_head_ != NULL);
+  size = (size + 3) & ~3;
+  alloc_stats_[kind] += size;
+  ArenaMemBlock* allocation_block = current_block_;  // Assume we'll fit.
+  size_t remaining_space = current_block_->block_size - current_block_->bytes_allocated;
+  if (remaining_space < size) {
+    /*
+     * Time to allocate a new block.  If this is a large allocation or we have
+     * significant space remaining in the current block then fulfill the allocation
+     * request with a custom-sized malloc() - otherwise grab a new standard block.
+     */
+    size_t allocation_size = sizeof(ArenaMemBlock);
+    if ((remaining_space >= ARENA_HIGH_WATER) || (size > block_size_)) {
+      allocation_size += size;
+    } else {
+      allocation_size += block_size_;
+    }
+    ArenaMemBlock *new_block = static_cast<ArenaMemBlock*>(malloc(allocation_size));
+    if (new_block == NULL) {
+      LOG(FATAL) << "Arena allocation failure";
+    }
+    malloc_bytes_ += allocation_size;
+    new_block->block_size = allocation_size - sizeof(ArenaMemBlock);
+    new_block->bytes_allocated = 0;
+    new_block->next = NULL;
+    num_arena_blocks_++;
+    /*
+     * If the new block is completely full, insert it into the head of the list so we don't
+     * bother trying to fit more and won't hide the potentially allocatable space on the
+     * last (current_block_) block.  TUNING: if we move to a mark scheme, revisit
+     * this code to keep allocation order intact.
+     */
+    if (new_block->block_size == size) {
+      new_block->next = arena_head_;
+      arena_head_ = new_block;
+    } else {
+      int lost = (current_block_->block_size - current_block_->bytes_allocated);
+      lost_bytes_ += lost;
+      current_block_->next = new_block;
+      current_block_ = new_block;
+    }
+    allocation_block = new_block;
+  }
+  void* ptr = &allocation_block->ptr[allocation_block->bytes_allocated];
+  allocation_block->bytes_allocated += size;
+  if (zero) {
+    memset(ptr, 0, size);
+  }
+  num_allocations_++;
+  return ptr;
+}
+
+// Dump memory usage stats.
+void ArenaAllocator::DumpMemStats(std::ostream& os) const {
+  size_t total = 0;
+  for (int i = 0; i < kNumAllocKinds; i++) {
+    total += alloc_stats_[i];
+  }
+  os << " MEM: used: " << total << ", allocated: " << malloc_bytes_
+     << ", lost: " << lost_bytes_ << "\n";
+  os << "Number of blocks allocated: " << num_arena_blocks_ << ", Number of allocations: "
+     << num_allocations_ << ", avg: " << total / num_allocations_ << "\n";
+  os << "===== Allocation by kind\n";
+  for (int i = 0; i < kNumAllocKinds; i++) {
+      os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
new file mode 100644
index 0000000..78d4614
--- /dev/null
+++ b/compiler/dex/arena_allocator.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+
+namespace art {
+
+#define ARENA_DEFAULT_BLOCK_SIZE (256 * 1024)
+#define ARENA_HIGH_WATER (16 * 1024)
+
+class ArenaAllocator {
+  public:
+
+    // Type of allocation for memory tuning.
+    enum ArenaAllocKind {
+      kAllocMisc,
+      kAllocBB,
+      kAllocLIR,
+      kAllocMIR,
+      kAllocDFInfo,
+      kAllocGrowableArray,
+      kAllocGrowableBitMap,
+      kAllocDalvikToSSAMap,
+      kAllocDebugInfo,
+      kAllocSuccessor,
+      kAllocRegAlloc,
+      kAllocData,
+      kAllocPredecessors,
+      kNumAllocKinds
+    };
+
+  ArenaAllocator(size_t default_size = ARENA_DEFAULT_BLOCK_SIZE);
+  ~ArenaAllocator();
+  void* NewMem(size_t size, bool zero, ArenaAllocKind kind);
+  size_t BytesAllocated() {
+    return malloc_bytes_;
+  }
+
+  void DumpMemStats(std::ostream& os) const;
+
+  private:
+
+    // Variable-length allocation block.
+    struct ArenaMemBlock {
+      size_t block_size;
+      size_t bytes_allocated;
+      ArenaMemBlock *next;
+      char ptr[0];
+    };
+
+    ArenaMemBlock* EmptyArenaBlock();
+
+    size_t default_size_;                    // Smallest size of new allocation block.
+    size_t block_size_;                      // Amount of allocatable bytes on a default block.
+    ArenaMemBlock* arena_head_;              // Head of linked list of allocation blocks.
+    ArenaMemBlock* current_block_;           // NOTE: code assumes there's always at least 1 block.
+    int num_arena_blocks_;
+    uint32_t malloc_bytes_;                  // Number of actual bytes malloc'd
+    uint32_t alloc_stats_[kNumAllocKinds];   // Bytes used by various allocation kinds.
+    uint32_t lost_bytes_;                    // Lost memory at end of too-small region
+    uint32_t num_allocations_;
+
+};  // ArenaAllocator
+
+
+struct MemStats {
+   public:
+     void Dump(std::ostream& os) const {
+       arena_.DumpMemStats(os);
+     }
+     MemStats(const ArenaAllocator &arena) : arena_(arena){};
+  private:
+    const ArenaAllocator &arena_;
+}; // MemStats
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
new file mode 100644
index 0000000..1fbf774
--- /dev/null
+++ b/compiler/dex/arena_bit_vector.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dex_file-inl.h"
+
+namespace art {
+
+// TODO: profile to make sure this is still a win relative to just using shifted masks.
+static uint32_t check_masks[32] = {
+  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+  0x40000000, 0x80000000 };
+
+ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
+                               bool expandable, OatBitMapKind kind)
+  :  arena_(arena),
+     expandable_(expandable),
+     kind_(kind),
+     storage_size_((start_bits + 31) >> 5),
+     storage_(static_cast<uint32_t*>(arena_->NewMem(storage_size_ * sizeof(uint32_t), true,
+                                                    ArenaAllocator::kAllocGrowableBitMap))) {
+  DCHECK_EQ(sizeof(storage_[0]), 4U);    // Assuming 32-bit units.
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool ArenaBitVector::IsBitSet(unsigned int num) {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+
+  unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f];
+  return (val != 0);
+}
+
+// Mark all bits bit as "clear".
+void ArenaBitVector::ClearAllBits() {
+  memset(storage_, 0, storage_size_ * sizeof(uint32_t));
+}
+
+// Mark the specified bit as "set".
+/*
+ * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
+ * not using it badly or change resize mechanism.
+ */
+void ArenaBitVector::SetBit(unsigned int num) {
+  if (num >= storage_size_ * sizeof(uint32_t) * 8) {
+    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
+
+    /* Round up to word boundaries for "num+1" bits */
+    unsigned int new_size = (num + 1 + 31) >> 5;
+    DCHECK_GT(new_size, storage_size_);
+    uint32_t *new_storage =
+        static_cast<uint32_t*>(arena_->NewMem(new_size * sizeof(uint32_t), false,
+                                              ArenaAllocator::kAllocGrowableBitMap));
+    memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
+    // Zero out the new storage words.
+    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
+    // TOTO: collect stats on space wasted because of resize.
+    storage_ = new_storage;
+    storage_size_ = new_size;
+  }
+
+  storage_[num >> 5] |= check_masks[num & 0x1f];
+}
+
+// Mark the specified bit as "unset".
+void ArenaBitVector::ClearBit(unsigned int num) {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+  storage_[num >> 5] &= ~check_masks[num & 0x1f];
+}
+
+// Copy a whole vector to the other. Sizes must match.
+void ArenaBitVector::Copy(ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+}
+
+// Intersect with another bit vector.  Sizes and expandability must be the same.
+void ArenaBitVector::Intersect(const ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (unsigned int idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] &= src->GetRawStorageWord(idx);
+  }
+}
+
+/*
+ * Union with another bit vector.  Sizes and expandability must be the same.
+ */
+void ArenaBitVector::Union(const ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (unsigned int idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] |= src->GetRawStorageWord(idx);
+  }
+}
+
+// Count the number of bits that are set.
+int ArenaBitVector::NumSetBits()
+{
+  unsigned int count = 0;
+
+  for (unsigned int word = 0; word < storage_size_; word++) {
+    count += __builtin_popcount(storage_[word]);
+  }
+  return count;
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void ArenaBitVector::SetInitialBits(unsigned int num_bits)
+{
+  DCHECK_LE(((num_bits + 31) >> 5), storage_size_);
+  unsigned int idx;
+  for (idx = 0; idx < (num_bits >> 5); idx++) {
+    storage_[idx] = -1;
+  }
+  unsigned int rem_num_bits = num_bits & 0x1f;
+  if (rem_num_bits) {
+    storage_[idx] = (1 << rem_num_bits) - 1;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
new file mode 100644
index 0000000..a950e82
--- /dev/null
+++ b/compiler/dex/arena_bit_vector.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+#include "arena_allocator.h"
+
+namespace art {
+
+/*
+ * Expanding bitmap, used for tracking resources.  Bits are numbered starting
+ * from zero.  All operations on a BitVector are unsynchronized.
+ */
+class ArenaBitVector {
+  public:
+
+    class Iterator {
+      public:
+        Iterator(ArenaBitVector* bit_vector)
+          : p_bits_(bit_vector),
+            bit_storage_(bit_vector->GetRawStorage()),
+            bit_index_(0),
+            bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {};
+
+        // Return the position of the next set bit.  -1 means end-of-element reached.
+        int Next() {
+          // Did anything obviously change since we started?
+          DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+          DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+          if (bit_index_ >= bit_size_) return -1;
+
+          uint32_t word_index = bit_index_ / 32;
+          uint32_t word = bit_storage_[word_index];
+          // Mask out any bits in the first word we've already considered.
+          word >>= bit_index_ & 0x1f;
+          if (word == 0) {
+            bit_index_ &= ~0x1f;
+            do {
+              word_index++;
+              if ((word_index * 32) >= bit_size_) {
+                bit_index_ = bit_size_;
+                return -1;
+              }
+              word = bit_storage_[word_index];
+              bit_index_ += 32;
+            } while (word == 0);
+          }
+          bit_index_ += CTZ(word) + 1;
+          return bit_index_ - 1;
+        }
+
+        static void* operator new(size_t size, ArenaAllocator* arena) {
+          return arena->NewMem(sizeof(ArenaBitVector::Iterator), true,
+                               ArenaAllocator::kAllocGrowableBitMap);
+        };
+        static void operator delete(void* p) {};  // Nop.
+
+      private:
+        ArenaBitVector* const p_bits_;
+        uint32_t* const bit_storage_;
+        uint32_t bit_index_;              // Current index (size in bits).
+        const uint32_t bit_size_;       // Size of vector in bits.
+    };
+
+    ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, bool expandable,
+                   OatBitMapKind kind = kBitMapMisc);
+    ~ArenaBitVector() {};
+
+    static void* operator new( size_t size, ArenaAllocator* arena) {
+      return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
+    }
+    static void operator delete(void* p) {};  // Nop.
+
+    void SetBit(unsigned int num);
+    void ClearBit(unsigned int num);
+    void MarkAllBits(bool set);
+    void DebugBitVector(char* msg, int length);
+    bool IsBitSet(unsigned int num);
+    void ClearAllBits();
+    void SetInitialBits(unsigned int num_bits);
+    void Copy(ArenaBitVector* src);
+    void Intersect(const ArenaBitVector* src2);
+    void Union(const ArenaBitVector* src);
+    // Are we equal to another bit vector?  Note: expandability attributes must also match.
+    bool Equal(const ArenaBitVector* src) {
+      return (storage_size_ == src->GetStorageSize()) &&
+        (expandable_ == src->IsExpandable()) &&
+        (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
+    }
+    int NumSetBits();
+
+    uint32_t GetStorageSize() const { return storage_size_; }
+    bool IsExpandable() const { return expandable_; }
+    uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
+    uint32_t* GetRawStorage() { return storage_; }
+    const uint32_t* GetRawStorage() const { return storage_; }
+
+  private:
+    ArenaAllocator* const arena_;
+    const bool expandable_;         // expand bitmap if we run out?
+    const OatBitMapKind kind_;      // for memory use tuning.
+    uint32_t   storage_size_;       // current size, in 32-bit words.
+    uint32_t*  storage_;
+};
+
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
new file mode 100644
index 0000000..45a1531
--- /dev/null
+++ b/compiler/dex/backend.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_BACKEND_H_
+#define ART_SRC_COMPILER_DEX_BACKEND_H_
+
+#include "compiled_method.h"
+#include "arena_allocator.h"
+
+namespace art {
+
+class Backend {
+
+  public:
+    virtual ~Backend() {};
+    virtual void Materialize() = 0;
+    virtual CompiledMethod* GetCompiledMethod() = 0;
+
+  protected:
+    Backend(ArenaAllocator* arena) : arena_(arena) {};
+    ArenaAllocator* const arena_;
+
+};  // Class Backend
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_BACKEND_H_
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
new file mode 100644
index 0000000..bc456b2
--- /dev/null
+++ b/compiler/dex/compiler_enums.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
+#define ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
+
+#include "dex_instruction.h"
+
+namespace art {
+
+enum RegisterClass {
+  kCoreReg,
+  kFPReg,
+  kAnyReg,
+};
+
+enum SpecialTargetRegister {
+  kSelf,            // Thread pointer.
+  kSuspend,         // Used to reduce suspend checks for some targets.
+  kLr,
+  kPc,
+  kSp,
+  kArg0,
+  kArg1,
+  kArg2,
+  kArg3,
+  kFArg0,
+  kFArg1,
+  kFArg2,
+  kFArg3,
+  kRet0,
+  kRet1,
+  kInvokeTgt,
+  kCount
+};
+
+enum RegLocationType {
+  kLocDalvikFrame = 0, // Normal Dalvik register
+  kLocPhysReg,
+  kLocCompilerTemp,
+  kLocInvalid
+};
+
+enum BBType {
+  kEntryBlock,
+  kDalvikByteCode,
+  kExitBlock,
+  kExceptionHandling,
+  kDead,
+};
+
+/*
+ * Def/Use encoding in 64-bit use_mask/def_mask.  Low positions used for target-specific
+ * registers (and typically use the register number as the position).  High positions
+ * reserved for common and abstract resources.
+ */
+
+enum ResourceEncodingPos {
+  kMustNotAlias = 63,
+  kHeapRef = 62,          // Default memory reference type.
+  kLiteral = 61,          // Literal pool memory reference.
+  kDalvikReg = 60,        // Dalvik v_reg memory reference.
+  kFPStatus = 59,
+  kCCode = 58,
+  kLowestCommonResource = kCCode
+};
+
+// Shared pseudo opcodes - must be < 0.
+enum LIRPseudoOpcode {
+  kPseudoExportedPC = -16,
+  kPseudoSafepointPC = -15,
+  kPseudoIntrinsicRetry = -14,
+  kPseudoSuspendTarget = -13,
+  kPseudoThrowTarget = -12,
+  kPseudoCaseLabel = -11,
+  kPseudoMethodEntry = -10,
+  kPseudoMethodExit = -9,
+  kPseudoBarrier = -8,
+  kPseudoEntryBlock = -7,
+  kPseudoExitBlock = -6,
+  kPseudoTargetLabel = -5,
+  kPseudoDalvikByteCodeBoundary = -4,
+  kPseudoPseudoAlign4 = -3,
+  kPseudoEHBlockLabel = -2,
+  kPseudoNormalBlockLabel = -1,
+};
+
+enum ExtendedMIROpcode {
+  kMirOpFirst = kNumPackedOpcodes,
+  kMirOpPhi = kMirOpFirst,
+  kMirOpCopy,
+  kMirOpFusedCmplFloat,
+  kMirOpFusedCmpgFloat,
+  kMirOpFusedCmplDouble,
+  kMirOpFusedCmpgDouble,
+  kMirOpFusedCmpLong,
+  kMirOpNop,
+  kMirOpNullCheck,
+  kMirOpRangeCheck,
+  kMirOpDivZeroCheck,
+  kMirOpCheck,
+  kMirOpCheckPart2,
+  kMirOpSelect,
+  kMirOpLast,
+};
+
+enum MIROptimizationFlagPositons {
+  kMIRIgnoreNullCheck = 0,
+  kMIRNullCheckOnly,
+  kMIRIgnoreRangeCheck,
+  kMIRRangeCheckOnly,
+  kMIRInlined,                        // Invoke is inlined (ie dead).
+  kMIRInlinedPred,                    // Invoke is inlined via prediction.
+  kMIRCallee,                         // Instruction is inlined from callee.
+  kMIRIgnoreSuspendCheck,
+  kMIRDup,
+  kMIRMark,                           // Temporary node mark.
+};
+
+// For successor_block_list.
+enum BlockListType {
+  kNotUsed = 0,
+  kCatch,
+  kPackedSwitch,
+  kSparseSwitch,
+};
+
+enum AssemblerStatus {
+  kSuccess,
+  kRetryAll,
+};
+
+enum OpSize {
+  kWord,
+  kLong,
+  kSingle,
+  kDouble,
+  kUnsignedHalf,
+  kSignedHalf,
+  kUnsignedByte,
+  kSignedByte,
+};
+
+std::ostream& operator<<(std::ostream& os, const OpSize& kind);
+
+enum OpKind {
+  kOpMov,
+  kOpMvn,
+  kOpCmp,
+  kOpLsl,
+  kOpLsr,
+  kOpAsr,
+  kOpRor,
+  kOpNot,
+  kOpAnd,
+  kOpOr,
+  kOpXor,
+  kOpNeg,
+  kOpAdd,
+  kOpAdc,
+  kOpSub,
+  kOpSbc,
+  kOpRsub,
+  kOpMul,
+  kOpDiv,
+  kOpRem,
+  kOpBic,
+  kOpCmn,
+  kOpTst,
+  kOpBkpt,
+  kOpBlx,
+  kOpPush,
+  kOpPop,
+  kOp2Char,
+  kOp2Short,
+  kOp2Byte,
+  kOpCondBr,
+  kOpUncondBr,
+  kOpBx,
+  kOpInvalid,
+};
+
+std::ostream& operator<<(std::ostream& os, const OpKind& kind);
+
+enum ConditionCode {
+  kCondEq,  // equal
+  kCondNe,  // not equal
+  kCondCs,  // carry set (unsigned less than)
+  kCondUlt = kCondCs,
+  kCondCc,  // carry clear (unsigned greater than or same)
+  kCondUge = kCondCc,
+  kCondMi,  // minus
+  kCondPl,  // plus, positive or zero
+  kCondVs,  // overflow
+  kCondVc,  // no overflow
+  kCondHi,  // unsigned greater than
+  kCondLs,  // unsigned lower or same
+  kCondGe,  // signed greater than or equal
+  kCondLt,  // signed less than
+  kCondGt,  // signed greater than
+  kCondLe,  // signed less than or equal
+  kCondAl,  // always
+  kCondNv,  // never
+};
+
+std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
+
+// Target specific condition encodings
+enum ArmConditionCode {
+  kArmCondEq = 0x0,  // 0000
+  kArmCondNe = 0x1,  // 0001
+  kArmCondCs = 0x2,  // 0010
+  kArmCondCc = 0x3,  // 0011
+  kArmCondMi = 0x4,  // 0100
+  kArmCondPl = 0x5,  // 0101
+  kArmCondVs = 0x6,  // 0110
+  kArmCondVc = 0x7,  // 0111
+  kArmCondHi = 0x8,  // 1000
+  kArmCondLs = 0x9,  // 1001
+  kArmCondGe = 0xa,  // 1010
+  kArmCondLt = 0xb,  // 1011
+  kArmCondGt = 0xc,  // 1100
+  kArmCondLe = 0xd,  // 1101
+  kArmCondAl = 0xe,  // 1110
+  kArmCondNv = 0xf,  // 1111
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
+
+enum X86ConditionCode {
+  kX86CondO   = 0x0,    // overflow
+  kX86CondNo  = 0x1,    // not overflow
+
+  kX86CondB   = 0x2,    // below
+  kX86CondNae = kX86CondB,  // not-above-equal
+  kX86CondC   = kX86CondB,  // carry
+
+  kX86CondNb  = 0x3,    // not-below
+  kX86CondAe  = kX86CondNb, // above-equal
+  kX86CondNc  = kX86CondNb, // not-carry
+
+  kX86CondZ   = 0x4,    // zero
+  kX86CondEq  = kX86CondZ,  // equal
+
+  kX86CondNz  = 0x5,    // not-zero
+  kX86CondNe  = kX86CondNz, // not-equal
+
+  kX86CondBe  = 0x6,    // below-equal
+  kX86CondNa  = kX86CondBe, // not-above
+
+  kX86CondNbe = 0x7,    // not-below-equal
+  kX86CondA   = kX86CondNbe,// above
+
+  kX86CondS   = 0x8,    // sign
+  kX86CondNs  = 0x9,    // not-sign
+
+  kX86CondP   = 0xa,    // 8-bit parity even
+  kX86CondPE  = kX86CondP,
+
+  kX86CondNp  = 0xb,    // 8-bit parity odd
+  kX86CondPo  = kX86CondNp,
+
+  kX86CondL   = 0xc,    // less-than
+  kX86CondNge = kX86CondL,  // not-greater-equal
+
+  kX86CondNl  = 0xd,    // not-less-than
+  kX86CondGe  = kX86CondNl, // not-greater-equal
+
+  kX86CondLe  = 0xe,    // less-than-equal
+  kX86CondNg  = kX86CondLe, // not-greater
+
+  kX86CondNle = 0xf,    // not-less-than
+  kX86CondG   = kX86CondNle,// greater
+};
+
+std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
+
+enum ThrowKind {
+  kThrowNullPointer,
+  kThrowDivZero,
+  kThrowArrayBounds,
+  kThrowConstantArrayBounds,
+  kThrowNoSuchMethod,
+  kThrowStackOverflow,
+};
+
+enum SpecialCaseHandler {
+  kNoHandler,
+  kNullMethod,
+  kConstFunction,
+  kIGet,
+  kIGetBoolean,
+  kIGetObject,
+  kIGetByte,
+  kIGetChar,
+  kIGetShort,
+  kIGetWide,
+  kIPut,
+  kIPutBoolean,
+  kIPutObject,
+  kIPutByte,
+  kIPutChar,
+  kIPutShort,
+  kIPutWide,
+  kIdentity,
+};
+
+enum DividePattern {
+  DivideNone,
+  Divide3,
+  Divide5,
+  Divide7,
+};
+
+std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
+
+// Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
+enum MemBarrierKind {
+  kLoadStore,
+  kLoadLoad,
+  kStoreStore,
+  kStoreLoad
+};
+
+std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
+
+enum OpFeatureFlags {
+  kIsBranch = 0,
+  kNoOperand,
+  kIsUnaryOp,
+  kIsBinaryOp,
+  kIsTertiaryOp,
+  kIsQuadOp,
+  kIsQuinOp,
+  kIsSextupleOp,
+  kIsIT,
+  kMemLoad,
+  kMemStore,
+  kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
+  kRegDef0,
+  kRegDef1,
+  kRegDefA,
+  kRegDefD,
+  kRegDefFPCSList0,
+  kRegDefFPCSList2,
+  kRegDefList0,
+  kRegDefList1,
+  kRegDefList2,
+  kRegDefLR,
+  kRegDefSP,
+  kRegUse0,
+  kRegUse1,
+  kRegUse2,
+  kRegUse3,
+  kRegUse4,
+  kRegUseA,
+  kRegUseC,
+  kRegUseD,
+  kRegUseFPCSList0,
+  kRegUseFPCSList2,
+  kRegUseList0,
+  kRegUseList1,
+  kRegUseLR,
+  kRegUsePC,
+  kRegUseSP,
+  kSetsCCodes,
+  kUsesCCodes
+};
+
+enum SelectInstructionKind {
+  kSelectNone,
+  kSelectConst,
+  kSelectMove,
+  kSelectGoto
+};
+
+std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
+
+// Type of growable bitmap for memory tuning.
+enum OatBitMapKind {
+  kBitMapMisc = 0,
+  kBitMapUse,
+  kBitMapDef,
+  kBitMapLiveIn,
+  kBitMapBMatrix,
+  kBitMapDominators,
+  kBitMapIDominated,
+  kBitMapDomFrontier,
+  kBitMapPhi,
+  kBitMapTmpBlocks,
+  kBitMapInputBlocks,
+  kBitMapRegisterV,
+  kBitMapTempSSARegisterV,
+  kBitMapNullCheck,
+  kBitMapTmpBlockV,
+  kBitMapPredecessors,
+  kNumBitMapKinds
+};
+
+std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind);
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
diff --git a/compiler/dex/compiler_internals.h b/compiler/dex/compiler_internals.h
new file mode 100644
index 0000000..a3fa25e
--- /dev/null
+++ b/compiler/dex/compiler_internals.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "base/logging.h"
+#include "class_linker.h"
+#include "driver/compiler_driver.h"
+#include "quick/mir_to_lir.h"
+#include "mir_graph.h"
+#include "compiler_ir.h"
+#include "frontend.h"
+#include "monitor.h"
+#include "thread.h"
+#include "utils.h"
+
+#endif  // ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
new file mode 100644
index 0000000..c6f99f3
--- /dev/null
+++ b/compiler/dex/compiler_ir.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_IR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_IR_H_
+
+#include <vector>
+#include <llvm/IR/Module.h>
+#include "arena_allocator.h"
+#include "backend.h"
+#include "compiler_enums.h"
+#include "dex/quick/mir_to_lir.h"
+#include "dex_instruction.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "llvm/intrinsic_helper.h"
+#include "llvm/ir_builder.h"
+#include "safe_map.h"
+
+namespace art {
+
+class LLVMInfo;
+namespace llvm {
+class LlvmCompilationUnit;
+}  // namespace llvm
+
+struct ArenaMemBlock;
+struct Memstats;
+class MIRGraph;
+class Mir2Lir;
+
+struct CompilationUnit {
+  CompilationUnit()
+    : compiler_driver(NULL),
+      class_linker(NULL),
+      dex_file(NULL),
+      class_loader(NULL),
+      class_def_idx(0),
+      method_idx(0),
+      code_item(NULL),
+      access_flags(0),
+      invoke_type(kDirect),
+      shorty(NULL),
+      disable_opt(0),
+      enable_debug(0),
+      verbose(false),
+      compiler_backend(kNoBackend),
+      instruction_set(kNone),
+      num_dalvik_registers(0),
+      insns(NULL),
+      num_ins(0),
+      num_outs(0),
+      num_regs(0),
+      num_compiler_temps(0),
+      compiler_flip_match(false),
+      mir_graph(NULL),
+      cg(NULL) {}
+  /*
+   * Fields needed/generated by common frontend and generally used throughout
+   * the compiler.
+  */
+  CompilerDriver* compiler_driver;
+  ClassLinker* class_linker;           // Linker to resolve fields and methods.
+  const DexFile* dex_file;             // DexFile containing the method being compiled.
+  jobject class_loader;                // compiling method's class loader.
+  uint32_t class_def_idx;              // compiling method's defining class definition index.
+  uint32_t method_idx;                 // compiling method's index into method_ids of DexFile.
+  const DexFile::CodeItem* code_item;  // compiling method's DexFile code_item.
+  uint32_t access_flags;               // compiling method's access flags.
+  InvokeType invoke_type;              // compiling method's invocation type.
+  const char* shorty;                  // compiling method's shorty.
+  uint32_t disable_opt;                // opt_control_vector flags.
+  uint32_t enable_debug;               // debugControlVector flags.
+  bool verbose;
+  CompilerBackend compiler_backend;
+  InstructionSet instruction_set;
+
+  // TODO: much of this info available elsewhere.  Go to the original source?
+  int num_dalvik_registers;        // method->registers_size.
+  const uint16_t* insns;
+  int num_ins;
+  int num_outs;
+  int num_regs;            // Unlike num_dalvik_registers, does not include ins.
+
+  // TODO: may want to move this to MIRGraph.
+  int num_compiler_temps;
+
+  // If non-empty, apply optimizer/debug flags only to matching methods.
+  std::string compiler_method_match;
+  // Flips sense of compiler_method_match - apply flags if doesn't match.
+  bool compiler_flip_match;
+
+  // TODO: move memory management to mir_graph, or just switch to using standard containers.
+  ArenaAllocator arena;
+
+  UniquePtr<MIRGraph> mir_graph;   // MIR container.
+  UniquePtr<Backend> cg;           // Target-specific codegen.
+};
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_IR_H_
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
new file mode 100644
index 0000000..b20004d
--- /dev/null
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
+#define ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
+
+#include "dataflow_iterator.h"
+
+namespace art {
+
+inline BasicBlock* DataflowIterator::NextBody(bool had_change) {
+  changed_ |= had_change;
+  BasicBlock* res = NULL;
+  if (reverse_) {
+    if (is_iterative_ && changed_ && (idx_ < 0)) {
+      idx_ = start_idx_;
+      changed_ = false;
+    }
+    if (idx_ >= 0) {
+      int bb_id = block_id_list_->Get(idx_--);
+      res = mir_graph_->GetBasicBlock(bb_id);
+    }
+  } else {
+    if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
+      idx_ = start_idx_;
+      changed_ = false;
+    }
+    if (idx_ < end_idx_) {
+      int bb_id = block_id_list_->Get(idx_++);
+      res = mir_graph_->GetBasicBlock(bb_id);
+    }
+  }
+  return res;
+}
+
+// AllNodes uses the existing GrowableArray iterator, so use different NextBody().
+inline BasicBlock* AllNodesIterator::NextBody(bool had_change) {
+  changed_ |= had_change;
+  BasicBlock* res = NULL;
+  bool keep_looking = true;
+  while (keep_looking) {
+    res = all_nodes_iterator_->Next();
+    if (is_iterative_ && changed_ && (res == NULL)) {
+      all_nodes_iterator_->Reset();
+      changed_ = false;
+    } else if ((res == NULL) || (!res->hidden)) {
+      keep_looking = false;
+    }
+  }
+  return res;
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
new file mode 100644
index 0000000..12cbf9c
--- /dev/null
+++ b/compiler/dex/dataflow_iterator.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
+#define ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
+
+#include "compiler_ir.h"
+#include "mir_graph.h"
+
+namespace art {
+
+  /*
+   * This class supports iterating over lists of basic blocks in various
+   * interesting orders.  Note that for efficiency, the visit orders have been pre-computed.
+   * The order itself will not change during the iteration.  However, for some uses,
+   * auxiliary data associated with the basic blocks may be changed during the iteration,
+   * necessitating another pass over the list.
+   *
+   * To support this usage, we have is_iterative_.  If false, the iteration is a one-shot
+   * pass through the pre-computed list using Next().  If true, the caller must tell the
+   * iterator whether a change has been made that necessitates another pass.  Use
+   * Next(had_change) for this.  The general idea is that the iterative_ use case means
+   * that the iterator will keep repeating the full basic block list until a complete pass
+   * is made through it with no changes.  Note that calling Next(true) does not affect
+   * the iteration order or short-curcuit the current pass - it simply tells the iterator
+   * that once it has finished walking through the block list it should reset and do another
+   * full pass through the list.
+   */
+  class DataflowIterator {
+    public:
+
+      virtual ~DataflowIterator(){}
+
+      // Return the next BasicBlock* to visit.
+      BasicBlock* Next() {
+        DCHECK(!is_iterative_);
+        return NextBody(false);
+      }
+
+      /*
+       * Return the next BasicBlock* to visit, and tell the iterator whether any change
+       * has occurred that requires another full pass over the block list.
+       */
+      BasicBlock* Next(bool had_change) {
+        DCHECK(is_iterative_);
+        return NextBody(had_change);
+      }
+
+    protected:
+      DataflowIterator(MIRGraph* mir_graph, bool is_iterative, int start_idx, int end_idx,
+                       bool reverse)
+          : mir_graph_(mir_graph),
+            is_iterative_(is_iterative),
+            start_idx_(start_idx),
+            end_idx_(end_idx),
+            reverse_(reverse),
+            block_id_list_(NULL),
+            idx_(0),
+            changed_(false) {}
+
+      virtual BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+
+      MIRGraph* const mir_graph_;
+      const bool is_iterative_;
+      const int start_idx_;
+      const int end_idx_;
+      const bool reverse_;
+      GrowableArray<int>* block_id_list_;
+      int idx_;
+      bool changed_;
+
+  }; // DataflowIterator
+
+  class ReachableNodesIterator : public DataflowIterator {
+    public:
+      ReachableNodesIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative, 0,
+                             mir_graph->GetNumReachableBlocks(), false) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsOrder();
+      }
+  };
+
+  class PreOrderDfsIterator : public DataflowIterator {
+    public:
+      PreOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative, 0,
+                             mir_graph->GetNumReachableBlocks(), false) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsOrder();
+      }
+  };
+
+  class PostOrderDfsIterator : public DataflowIterator {
+    public:
+
+      PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative, 0,
+                             mir_graph->GetNumReachableBlocks(), false) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsPostOrder();
+      }
+  };
+
+  class ReversePostOrderDfsIterator : public DataflowIterator {
+    public:
+      ReversePostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative,
+                             mir_graph->GetNumReachableBlocks() -1, 0, true) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDfsPostOrder();
+      }
+  };
+
+  class PostOrderDOMIterator : public DataflowIterator {
+    public:
+      PostOrderDOMIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative, 0,
+                             mir_graph->GetNumReachableBlocks(), false) {
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetDomPostOrder();
+      }
+  };
+
+  class AllNodesIterator : public DataflowIterator {
+    public:
+      AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
+          : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
+        all_nodes_iterator_ =
+            new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
+      }
+
+      void Reset() {
+        all_nodes_iterator_->Reset();
+      }
+
+      BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+
+    private:
+      GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
+  };
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
new file mode 100644
index 0000000..ee68a5d
--- /dev/null
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field-inl.h"
+
+namespace art {
+namespace optimizer {
+
+// Controls quickening activation.
+const bool kEnableQuickening = true;
+// Controls logging.
+const bool kEnableLogging = false;
+
+class DexCompiler {
+ public:
+  DexCompiler(art::CompilerDriver& compiler,
+              const DexCompilationUnit& unit)
+    : driver_(compiler),
+      unit_(unit) {};
+
+  ~DexCompiler() {};
+
+  void Compile();
+
+ private:
+  const DexFile& GetDexFile() const {
+    return *unit_.GetDexFile();
+  }
+
+  // TODO: since the whole compilation pipeline uses a "const DexFile", we need
+  // to "unconst" here. The DEX-to-DEX compiler should work on a non-const DexFile.
+  DexFile& GetModifiableDexFile() {
+    return *const_cast<DexFile*>(unit_.GetDexFile());
+  }
+
+  // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
+  // a barrier is required.
+  void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
+
+  // Compiles a field access into a quick field access.
+  // The field index is replaced by an offset within an Object where we can read
+  // from / write to this field. Therefore, this does not involve any resolution
+  // at runtime.
+  // Since the field index is encoded with 16 bits, we can replace it only if the
+  // field offset can be encoded with 16 bits too.
+  void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
+                                  Instruction::Code new_opcode, bool is_put);
+
+  // Compiles a virtual method invocation into a quick virtual method invocation.
+  // The method index is replaced by the vtable index where the corresponding
+  // AbstractMethod can be found. Therefore, this does not involve any resolution
+  // at runtime.
+  // Since the method index is encoded with 16 bits, we can replace it only if the
+  // vtable index can be encoded with 16 bits too.
+  void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
+                            Instruction::Code new_opcode, bool is_range);
+
+  CompilerDriver& driver_;
+  const DexCompilationUnit& unit_;
+
+  DISALLOW_COPY_AND_ASSIGN(DexCompiler);
+};
+
+// Ensures write access to a part of DEX file.
+//
+// If a DEX file is read-only, it modifies its protection (mprotect) so it allows
+// write access to the part of DEX file defined by an address and a length.
+// In this case, it also takes the DexFile::modification_lock to prevent from
+// concurrent protection modification from a parallel DEX-to-DEX compilation on
+// the same DEX file.
+// When the instance is destroyed, it recovers original protection and releases
+// the lock.
+// TODO: as this scoped class is similar to a MutexLock we should use annotalysis
+// to capture the locking behavior.
+class ScopedDexWriteAccess {
+ public:
+  ScopedDexWriteAccess(DexFile& dex_file, Instruction* inst,
+                       size_t length)
+    : dex_file_(dex_file),
+      address_(reinterpret_cast<uint8_t*>(inst)),
+      length_(length),
+      is_read_only_(dex_file_.IsReadOnly()) {
+    if (is_read_only_) {
+      // We need to enable DEX write access. To avoid concurrent DEX write access
+      // modification, we take the DexFile::modification_lock before.
+      dex_file_.GetModificationLock().ExclusiveLock(Thread::Current());
+      bool success = dex_file_.EnableWrite(address_, length_);
+      DCHECK(success) << "Failed to enable DEX write access";
+    }
+  }
+
+  ~ScopedDexWriteAccess() {
+    DCHECK_EQ(is_read_only_, dex_file_.IsReadOnly());
+    if (is_read_only_) {
+      bool success = dex_file_.DisableWrite(address_, length_);
+      DCHECK(success) << "Failed to disable DEX write access";
+      // Now we recovered original read-only protection, we can release the
+      // DexFile::modification_lock.
+      dex_file_.GetModificationLock().ExclusiveUnlock(Thread::Current());
+    }
+  }
+
+ private:
+  DexFile& dex_file_;
+  // TODO: make address_ const.
+  uint8_t* address_;
+  const size_t length_;
+  const bool is_read_only_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedDexWriteAccess);
+};
+
+void DexCompiler::Compile() {
+  const DexFile::CodeItem* code_item = unit_.GetCodeItem();
+  const uint16_t* insns = code_item->insns_;
+  const uint32_t insns_size = code_item->insns_size_in_code_units_;
+  Instruction* inst = const_cast<Instruction*>(Instruction::At(insns));
+
+  for (uint32_t dex_pc = 0; dex_pc < insns_size;
+       inst = const_cast<Instruction*>(inst->Next()), dex_pc = inst->GetDexPc(insns)) {
+    switch (inst->Opcode()) {
+      case Instruction::RETURN_VOID:
+        CompileReturnVoid(inst, dex_pc);
+        break;
+
+      case Instruction::IGET:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false);
+        break;
+
+      case Instruction::IGET_WIDE:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false);
+        break;
+
+      case Instruction::IGET_OBJECT:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false);
+        break;
+
+      case Instruction::IPUT:
+      case Instruction::IPUT_BOOLEAN:
+      case Instruction::IPUT_BYTE:
+      case Instruction::IPUT_CHAR:
+      case Instruction::IPUT_SHORT:
+        // These opcodes have the same implementation in interpreter so group
+        // them under IPUT_QUICK.
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
+        break;
+
+      case Instruction::IPUT_WIDE:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE_QUICK, true);
+        break;
+
+      case Instruction::IPUT_OBJECT:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT_QUICK, true);
+        break;
+
+      case Instruction::INVOKE_VIRTUAL:
+        CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_QUICK, false);
+        break;
+
+      case Instruction::INVOKE_VIRTUAL_RANGE:
+        CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, true);
+        break;
+
+      default:
+        // Nothing to do.
+        break;
+    }
+  }
+}
+
+void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
+  DCHECK(inst->Opcode() == Instruction::RETURN_VOID);
+  // Are we compiling a constructor ?
+  if ((unit_.GetAccessFlags() & kAccConstructor) == 0) {
+    return;
+  }
+  // Do we need a constructor barrier ?
+  if (!driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
+                                         unit_.GetClassDefIndex())) {
+    return;
+  }
+  // Replace RETURN_VOID by RETURN_VOID_BARRIER.
+  if (kEnableLogging) {
+    LOG(INFO) << "Replacing " << Instruction::Name(inst->Opcode())
+    << " by " << Instruction::Name(Instruction::RETURN_VOID_BARRIER)
+    << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
+    << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
+  }
+  ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 2u);
+  inst->SetOpcode(Instruction::RETURN_VOID_BARRIER);
+}
+
+void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
+                                             uint32_t dex_pc,
+                                             Instruction::Code new_opcode,
+                                             bool is_put) {
+  if (!kEnableQuickening) {
+    return;
+  }
+  uint32_t field_idx = inst->VRegC_22c();
+  int field_offset;
+  bool is_volatile;
+  bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, field_offset,
+                                                    is_volatile, is_put);
+  if (fast_path && !is_volatile && IsUint(16, field_offset)) {
+    // TODO: use VLOG ?
+    if (kEnableLogging) {
+      LOG(INFO) << "Quickening " << Instruction::Name(inst->Opcode())
+                << " to " << Instruction::Name(new_opcode)
+                << " by replacing field index " << field_idx
+                << " by field offset " << field_offset
+                << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
+                << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
+    }
+    // We are modifying 4 consecutive bytes.
+    ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 4u);
+    inst->SetOpcode(new_opcode);
+    // Replace field index by field offset.
+    inst->SetVRegC_22c(static_cast<uint16_t>(field_offset));
+  }
+}
+
+void DexCompiler::CompileInvokeVirtual(Instruction* inst,
+                                uint32_t dex_pc,
+                                Instruction::Code new_opcode,
+                                bool is_range) {
+  if (!kEnableQuickening) {
+    return;
+  }
+  uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+  MethodReference target_method(&GetDexFile(), method_idx);
+  InvokeType invoke_type = kVirtual;
+  InvokeType original_invoke_type = invoke_type;
+  int vtable_idx;
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc, invoke_type,
+                                             target_method, vtable_idx,
+                                             direct_code, direct_method,
+                                             false);
+  // TODO: support devirtualization.
+  if (fast_path && original_invoke_type == invoke_type) {
+    if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
+      // TODO: use VLOG ?
+      if (kEnableLogging) {
+        LOG(INFO) << "Quickening " << Instruction::Name(inst->Opcode())
+                  << "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
+                  << " to " << Instruction::Name(new_opcode)
+                  << " by replacing method index " << method_idx
+                  << " by vtable index " << vtable_idx
+                  << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
+                  << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
+      }
+      // We are modifying 4 consecutive bytes.
+      ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 4u);
+      inst->SetOpcode(new_opcode);
+      // Replace method index by vtable index.
+      if (is_range) {
+        inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
+      } else {
+        inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
+      }
+    }
+  }
+}
+
+}  // namespace optimizer
+}  // namespace art
+
+extern "C" art::CompiledMethod*
+    ArtCompileDEX(art::CompilerDriver& compiler, const art::DexFile::CodeItem* code_item,
+                  uint32_t access_flags, art::InvokeType invoke_type,
+                  uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                  const art::DexFile& dex_file) {
+  art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+                               dex_file, code_item, class_def_idx, method_idx, access_flags);
+  art::optimizer::DexCompiler dex_compiler(compiler, unit);
+  dex_compiler.Compile();
+  return NULL;
+}
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
new file mode 100644
index 0000000..746d475
--- /dev/null
+++ b/compiler/dex/frontend.cc
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <llvm/Support/Threading.h>
+
+#include "compiler_internals.h"
+#include "driver/compiler_driver.h"
+#include "dataflow_iterator-inl.h"
+#include "leb128.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "backend.h"
+#include "base/logging.h"
+
+#if defined(ART_USE_PORTABLE_COMPILER)
+#include "dex/portable/mir_to_gbc.h"
+#include "llvm/llvm_compilation_unit.h"
+#endif
+
+namespace {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+  pthread_once_t llvm_multi_init = PTHREAD_ONCE_INIT;
+#endif
+  void InitializeLLVMForQuick() {
+    ::llvm::llvm_start_multithreaded();
+  }
+}
+
+namespace art {
+namespace llvm {
+::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
+}
+
+LLVMInfo::LLVMInfo() {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+  pthread_once(&llvm_multi_init, InitializeLLVMForQuick);
+#endif
+  // Create context, module, intrinsic helper & ir builder
+  llvm_context_.reset(new ::llvm::LLVMContext());
+  llvm_module_ = new ::llvm::Module("art", *llvm_context_);
+  ::llvm::StructType::create(*llvm_context_, "JavaObject");
+  art::llvm::makeLLVMModuleContents(llvm_module_);
+  intrinsic_helper_.reset( new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
+  ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
+}
+
+LLVMInfo::~LLVMInfo() {
+}
+
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& compiler) {
+  CHECK(compiler.GetCompilerContext() == NULL);
+  LLVMInfo* llvm_info = new LLVMInfo();
+  compiler.SetCompilerContext(llvm_info);
+}
+
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler) {
+  delete reinterpret_cast<LLVMInfo*>(compiler.GetCompilerContext());
+  compiler.SetCompilerContext(NULL);
+}
+
+/* Default optimizer/debug setting for the compiler. */
+static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
+  (1 << kLoadStoreElimination) |
+  //(1 << kLoadHoisting) |
+  //(1 << kSuppressLoads) |
+  //(1 << kNullCheckElimination) |
+  //(1 << kPromoteRegs) |
+  //(1 << kTrackLiveTemps) |
+  //(1 << kSafeOptimizations) |
+  //(1 << kBBOpt) |
+  //(1 << kMatch) |
+  //(1 << kPromoteCompilerTemps) |
+  0;
+
+static uint32_t kCompilerDebugFlags = 0 |     // Enable debug/testing modes
+  //(1 << kDebugDisplayMissingTargets) |
+  //(1 << kDebugVerbose) |
+  //(1 << kDebugDumpCFG) |
+  //(1 << kDebugSlowFieldPath) |
+  //(1 << kDebugSlowInvokePath) |
+  //(1 << kDebugSlowStringPath) |
+  //(1 << kDebugSlowestFieldPath) |
+  //(1 << kDebugSlowestStringPath) |
+  //(1 << kDebugExerciseResolveMethod) |
+  //(1 << kDebugVerifyDataflow) |
+  //(1 << kDebugShowMemoryUsage) |
+  //(1 << kDebugShowNops) |
+  //(1 << kDebugCountOpcodes) |
+  //(1 << kDebugDumpCheckStats) |
+  //(1 << kDebugDumpBitcodeFile) |
+  //(1 << kDebugVerifyBitcode) |
+  //(1 << kDebugShowSummaryMemoryUsage) |
+  0;
+
+static CompiledMethod* CompileMethod(CompilerDriver& compiler,
+                                     const CompilerBackend compiler_backend,
+                                     const DexFile::CodeItem* code_item,
+                                     uint32_t access_flags, InvokeType invoke_type,
+                                     uint32_t class_def_idx, uint32_t method_idx,
+                                     jobject class_loader, const DexFile& dex_file
+#if defined(ART_USE_PORTABLE_COMPILER)
+                                     , llvm::LlvmCompilationUnit* llvm_compilation_unit
+#endif
+) {
+  VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
+
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  UniquePtr<CompilationUnit> cu(new CompilationUnit);
+
+  cu->compiler_driver = &compiler;
+  cu->class_linker = class_linker;
+  cu->instruction_set = compiler.GetInstructionSet();
+  cu->compiler_backend = compiler_backend;
+  DCHECK((cu->instruction_set == kThumb2) ||
+         (cu->instruction_set == kX86) ||
+         (cu->instruction_set == kMips));
+
+
+  /* Adjust this value accordingly once inlining is performed */
+  cu->num_dalvik_registers = code_item->registers_size_;
+  // TODO: set this from command line
+  cu->compiler_flip_match = false;
+  bool use_match = !cu->compiler_method_match.empty();
+  bool match = use_match && (cu->compiler_flip_match ^
+      (PrettyMethod(method_idx, dex_file).find(cu->compiler_method_match) !=
+       std::string::npos));
+  if (!use_match || match) {
+    cu->disable_opt = kCompilerOptimizerDisableFlags;
+    cu->enable_debug = kCompilerDebugFlags;
+    cu->verbose = VLOG_IS_ON(compiler) ||
+        (cu->enable_debug & (1 << kDebugVerbose));
+  }
+
+  /*
+   * TODO: rework handling of optimization and debug flags.  Should we split out
+   * MIR and backend flags?  Need command-line setting as well.
+   */
+
+  if (compiler_backend == kPortable) {
+    // Fused long branches not currently usseful in bitcode.
+    cu->disable_opt |= (1 << kBranchFusing);
+  }
+
+  if (cu->instruction_set == kMips) {
+    // Disable some optimizations for mips for now
+    cu->disable_opt |= (
+        (1 << kLoadStoreElimination) |
+        (1 << kLoadHoisting) |
+        (1 << kSuppressLoads) |
+        (1 << kNullCheckElimination) |
+        (1 << kPromoteRegs) |
+        (1 << kTrackLiveTemps) |
+        (1 << kSafeOptimizations) |
+        (1 << kBBOpt) |
+        (1 << kMatch) |
+        (1 << kPromoteCompilerTemps));
+  }
+
+  cu->mir_graph.reset(new MIRGraph(cu.get(), &cu->arena));
+
+  /* Gathering opcode stats? */
+  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+    cu->mir_graph->EnableOpcodeCounting();
+  }
+
+  /* Build the raw MIR graph */
+  cu->mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+                              class_loader, dex_file);
+
+  /* Do a code layout pass */
+  cu->mir_graph->CodeLayout();
+
+  /* Perform SSA transformation for the whole method */
+  cu->mir_graph->SSATransformation();
+
+  /* Do constant propagation */
+  cu->mir_graph->PropagateConstants();
+
+  /* Count uses */
+  cu->mir_graph->MethodUseCount();
+
+  /* Perform null check elimination */
+  cu->mir_graph->NullCheckElimination();
+
+  /* Combine basic blocks where possible */
+  cu->mir_graph->BasicBlockCombine();
+
+  /* Do some basic block optimizations */
+  cu->mir_graph->BasicBlockOptimization();
+
+  if (cu->enable_debug & (1 << kDebugDumpCheckStats)) {
+    cu->mir_graph->DumpCheckStats();
+  }
+
+  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+    cu->mir_graph->ShowOpcodeStats();
+  }
+
+  /* Set up regLocation[] array to describe values - one for each ssa_name. */
+  cu->mir_graph->BuildRegLocations();
+
+  CompiledMethod* result = NULL;
+
+#if defined(ART_USE_PORTABLE_COMPILER)
+  if (compiler_backend == kPortable) {
+    cu->cg.reset(PortableCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena,
+                                       llvm_compilation_unit));
+  } else
+#endif
+  {
+    switch (compiler.GetInstructionSet()) {
+      case kThumb2:
+        cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
+      case kMips:
+        cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
+      case kX86:
+        cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
+      default:
+        LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
+    }
+  }
+
+  cu->cg->Materialize();
+
+  result = cu->cg->GetCompiledMethod();
+
+  if (result) {
+    VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file);
+  } else {
+    VLOG(compiler) << "Deferred " << PrettyMethod(method_idx, dex_file);
+  }
+
+  if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
+    if (cu->arena.BytesAllocated() > (5 * 1024 *1024)) {
+      MemStats mem_stats(cu->arena);
+      LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
+    }
+  }
+
+  if (cu->enable_debug & (1 << kDebugShowSummaryMemoryUsage)) {
+    LOG(INFO) << "MEMINFO " << cu->arena.BytesAllocated() << " " << cu->mir_graph->GetNumBlocks()
+              << " " << PrettyMethod(method_idx, dex_file);
+  }
+
+  return result;
+}
+
+CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
+                                 const CompilerBackend backend,
+                                 const DexFile::CodeItem* code_item,
+                                 uint32_t access_flags,
+                                 InvokeType invoke_type,
+                                 uint32_t class_def_idx,
+                                 uint32_t method_idx,
+                                 jobject class_loader,
+                                 const DexFile& dex_file,
+                                 llvm::LlvmCompilationUnit* llvm_compilation_unit) {
+  return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
+                       method_idx, class_loader, dex_file
+#if defined(ART_USE_PORTABLE_COMPILER)
+                       , llvm_compilation_unit
+#endif
+                       );
+}
+
+}  // namespace art
+
+extern "C" art::CompiledMethod*
+    ArtQuickCompileMethod(art::CompilerDriver& compiler,
+                          const art::DexFile::CodeItem* code_item,
+                          uint32_t access_flags, art::InvokeType invoke_type,
+                          uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                          const art::DexFile& dex_file) {
+  // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use build default
+  art::CompilerBackend backend = compiler.GetCompilerBackend();
+  return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+                               class_def_idx, method_idx, class_loader, dex_file,
+                               NULL /* use thread llvm_info */);
+}
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
new file mode 100644
index 0000000..69d7f77
--- /dev/null
+++ b/compiler/dex/frontend.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_H_
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+
+
+
+
+
+
+namespace llvm {
+  class Module;
+  class LLVMContext;
+}
+
+namespace art {
+namespace llvm {
+  class IntrinsicHelper;
+  class IRBuilder;
+}
+
+/*
+ * Assembly is an iterative process, and usually terminates within
+ * two or three passes.  This should be high enough to handle bizarre
+ * cases, but detect an infinite loop bug.
+ */
+#define MAX_ASSEMBLER_RETRIES 50
+
+// Suppress optimization if corresponding bit set.
+enum opt_control_vector {
+  kLoadStoreElimination = 0,
+  kLoadHoisting,
+  kSuppressLoads,
+  kNullCheckElimination,
+  kPromoteRegs,
+  kTrackLiveTemps,
+  kSafeOptimizations,
+  kBBOpt,
+  kMatch,
+  kPromoteCompilerTemps,
+  kBranchFusing,
+};
+
+// Force code generation paths for testing.
+enum debugControlVector {
+  kDebugVerbose,
+  kDebugDumpCFG,
+  kDebugSlowFieldPath,
+  kDebugSlowInvokePath,
+  kDebugSlowStringPath,
+  kDebugSlowTypePath,
+  kDebugSlowestFieldPath,
+  kDebugSlowestStringPath,
+  kDebugExerciseResolveMethod,
+  kDebugVerifyDataflow,
+  kDebugShowMemoryUsage,
+  kDebugShowNops,
+  kDebugCountOpcodes,
+  kDebugDumpCheckStats,
+  kDebugDumpBitcodeFile,
+  kDebugVerifyBitcode,
+  kDebugShowSummaryMemoryUsage,
+};
+
+class LLVMInfo {
+  public:
+    LLVMInfo();
+    ~LLVMInfo();
+
+    ::llvm::LLVMContext* GetLLVMContext() {
+      return llvm_context_.get();
+    }
+
+    ::llvm::Module* GetLLVMModule() {
+      return llvm_module_;
+    }
+
+    art::llvm::IntrinsicHelper* GetIntrinsicHelper() {
+      return intrinsic_helper_.get();
+    }
+
+    art::llvm::IRBuilder* GetIRBuilder() {
+      return ir_builder_.get();
+    }
+
+  private:
+    UniquePtr< ::llvm::LLVMContext> llvm_context_;
+    ::llvm::Module* llvm_module_; // Managed by context_.
+    UniquePtr<art::llvm::IntrinsicHelper> intrinsic_helper_;
+    UniquePtr<art::llvm::IRBuilder> ir_builder_;
+};
+
+struct CompilationUnit;
+struct BasicBlock;
+
+}  // namespace art
+
+extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
+                                                 const art::DexFile::CodeItem* code_item,
+                                                 uint32_t access_flags,
+                                                 art::InvokeType invoke_type,
+                                                 uint32_t class_dex_idx,
+                                                 uint32_t method_idx,
+                                                 jobject class_loader,
+                                                 const art::DexFile& dex_file);
+
+
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_H_
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
new file mode 100644
index 0000000..c4684a7
--- /dev/null
+++ b/compiler/dex/growable_array.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
+#define ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+#include "arena_allocator.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+// Type of growable list for memory tuning.
+enum OatListKind {
+  kGrowableArrayMisc = 0,
+  kGrowableArrayBlockList,
+  kGrowableArraySSAtoDalvikMap,
+  kGrowableArrayDfsOrder,
+  kGrowableArrayDfsPostOrder,
+  kGrowableArrayDomPostOrderTraversal,
+  kGrowableArrayThrowLaunchPads,
+  kGrowableArraySuspendLaunchPads,
+  kGrowableArraySwitchTables,
+  kGrowableArrayFillArrayData,
+  kGrowableArraySuccessorBlocks,
+  kGrowableArrayPredecessors,
+  kGNumListKinds
+};
+
+template<typename T>
+class GrowableArray {
+  public:
+
+    class Iterator {
+      public:
+        Iterator(GrowableArray* g_list)
+          : idx_(0),
+            g_list_(g_list) {};
+
+        // NOTE: returns 0/NULL when no next.
+        // TODO: redo to make usage consistent with other iterators.
+        T Next() {
+          if (idx_ >= g_list_->Size()) {
+            return 0;
+          } else {
+            return g_list_->Get(idx_++);
+          }
+        }
+
+        void Reset() {
+          idx_ = 0;
+        }
+
+        static void* operator new(size_t size, ArenaAllocator* arena) {
+          return arena->NewMem(sizeof(GrowableArray::Iterator), true, ArenaAllocator::kAllocGrowableArray);
+        };
+        static void operator delete(void* p) {};  // Nop.
+
+      private:
+        size_t idx_;
+        GrowableArray* const g_list_;
+    };
+
+    GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+      : arena_(arena),
+        num_allocated_(init_length),
+        num_used_(0),
+        kind_(kind) {
+      elem_list_ = static_cast<T*>(arena_->NewMem(sizeof(T) * init_length, true,
+                                                  ArenaAllocator::kAllocGrowableArray));
+    };
+
+
+    // Expand the list size to at least new length.
+    void Resize(size_t new_length) {
+      if (new_length <= num_allocated_) return;
+      // If it's a small list double the size, else grow 1.5x.
+      size_t target_length =
+          (num_allocated_ < 128) ? num_allocated_ << 1 : num_allocated_ + (num_allocated_ >> 1);
+      if (new_length > target_length) {
+         target_length = new_length;
+      }
+      T* new_array = static_cast<T*>(arena_->NewMem(sizeof(T) * target_length, true,
+                                                    ArenaAllocator::kAllocGrowableArray));
+      memcpy(new_array, elem_list_, sizeof(T) * num_allocated_);
+      num_allocated_ = target_length;
+      elem_list_ = new_array;
+    };
+
+    // NOTE: does not return storage, just resets use count.
+    void Reset() {
+      num_used_ = 0;
+    }
+
+    // Insert an element to the end of a list, resizing if necessary.
+    void Insert(T elem) {
+      if (num_used_ == num_allocated_) {
+        Resize(num_used_ + 1);
+      }
+      elem_list_[num_used_++] = elem;
+    };
+
+    T Get(size_t index) const {
+      DCHECK_LT(index, num_used_);
+      return elem_list_[index];
+    };
+
+    // Overwrite existing element at position index.  List must be large enough.
+    void Put(size_t index, T elem) {
+      DCHECK_LT(index, num_used_);
+      elem_list_[index] = elem;
+    }
+
+    void Increment(size_t index) {
+      DCHECK_LT(index, num_used_);
+      elem_list_[index]++;
+    }
+
+    void Delete(T element) {
+      bool found = false;
+      for (size_t i = 0; i < num_used_ - 1; i++) {
+        if (!found && elem_list_[i] == element) {
+          found = true;
+        }
+        if (found) {
+          elem_list_[i] = elem_list_[i+1];
+        }
+      }
+      // We should either have found the element, or it was the last (unscanned) element.
+      DCHECK(found || (element == elem_list_[num_used_ - 1]));
+      num_used_--;
+    };
+
+    size_t GetNumAllocated() const { return num_allocated_; }
+
+    size_t Size() const { return num_used_; }
+
+    T* GetRawStorage() const { return elem_list_; }
+
+    static void* operator new(size_t size, ArenaAllocator* arena) {
+      return arena->NewMem(sizeof(GrowableArray<T>), true, ArenaAllocator::kAllocGrowableArray);
+    };
+    static void operator delete(void* p) {};  // Nop.
+
+  private:
+    ArenaAllocator* const arena_;
+    size_t num_allocated_;
+    size_t num_used_;
+    OatListKind kind_;
+    T* elem_list_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
new file mode 100644
index 0000000..ec5ab5d
--- /dev/null
+++ b/compiler/dex/local_value_numbering.cc
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "local_value_numbering.h"
+
+namespace art {
+
+
+uint16_t LocalValueNumbering::GetValueNumber(MIR* mir)
+{
+  uint16_t res = NO_VALUE;
+  uint16_t opcode = mir->dalvikInsn.opcode;
+  switch (opcode) {
+    case Instruction::NOP:
+    case Instruction::RETURN_VOID:
+    case Instruction::RETURN:
+    case Instruction::RETURN_OBJECT:
+    case Instruction::RETURN_WIDE:
+    case Instruction::MONITOR_ENTER:
+    case Instruction::MONITOR_EXIT:
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32:
+    case Instruction::CHECK_CAST:
+    case Instruction::THROW:
+    case Instruction::FILL_ARRAY_DATA:
+    case Instruction::FILLED_NEW_ARRAY:
+    case Instruction::FILLED_NEW_ARRAY_RANGE:
+    case Instruction::PACKED_SWITCH:
+    case Instruction::SPARSE_SWITCH:
+    case Instruction::IF_EQ:
+    case Instruction::IF_NE:
+    case Instruction::IF_LT:
+    case Instruction::IF_GE:
+    case Instruction::IF_GT:
+    case Instruction::IF_LE:
+    case Instruction::IF_EQZ:
+    case Instruction::IF_NEZ:
+    case Instruction::IF_LTZ:
+    case Instruction::IF_GEZ:
+    case Instruction::IF_GTZ:
+    case Instruction::IF_LEZ:
+    case Instruction::INVOKE_STATIC_RANGE:
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+    case Instruction::INVOKE_SUPER:
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE:
+    case kMirOpFusedCmplFloat:
+    case kMirOpFusedCmpgFloat:
+    case kMirOpFusedCmplDouble:
+    case kMirOpFusedCmpgDouble:
+    case kMirOpFusedCmpLong:
+      // Nothing defined - take no action.
+      break;
+
+    case Instruction::MOVE_EXCEPTION:
+    case Instruction::MOVE_RESULT:
+    case Instruction::MOVE_RESULT_OBJECT:
+    case Instruction::INSTANCE_OF:
+    case Instruction::NEW_INSTANCE:
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+    case Instruction::CONST_CLASS:
+    case Instruction::NEW_ARRAY: {
+        // 1 result, treat as unique each time, use result s_reg - will be unique.
+        uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+    case Instruction::MOVE_RESULT_WIDE: {
+        // 1 wide result, treat as unique each time, use result s_reg - will be unique.
+        uint16_t res = GetOperandValueWide(mir->ssa_rep->defs[0]);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case kMirOpPhi:
+      /*
+       * Because we'll only see phi nodes at the beginning of an extended basic block,
+       * we can ignore them.  Revisit if we shift to global value numbering.
+       */
+      break;
+
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_OBJECT_FROM16:
+    case kMirOpCopy: {
+        // Just copy value number of source to value number of resulit.
+        uint16_t res = GetOperandValue(mir->ssa_rep->uses[0]);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::MOVE_WIDE:
+    case Instruction::MOVE_WIDE_16:
+    case Instruction::MOVE_WIDE_FROM16: {
+        // Just copy value number of source to value number of result.
+        uint16_t res = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CONST:
+    case Instruction::CONST_4:
+    case Instruction::CONST_16: {
+        uint16_t res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+                                   High16Bits(mir->dalvikInsn.vB >> 16), 0);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CONST_HIGH16: {
+        uint16_t res = LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32: {
+        uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+                                       High16Bits(mir->dalvikInsn.vB >> 16), 1);
+        uint16_t high_res;
+        if (mir->dalvikInsn.vB & 0x80000000) {
+          high_res = LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
+        } else {
+          high_res = LookupValue(Instruction::CONST, 0, 0, 2);
+        }
+        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CONST_WIDE: {
+        uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
+        uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
+        uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(low_word),
+                                       High16Bits(low_word), 1);
+        uint16_t high_res = LookupValue(Instruction::CONST, Low16Bits(high_word),
+                                       High16Bits(high_word), 2);
+        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CONST_WIDE_HIGH16: {
+        uint16_t low_res = LookupValue(Instruction::CONST, 0, 0, 1);
+        uint16_t high_res = LookupValue(Instruction::CONST, 0, Low16Bits(mir->dalvikInsn.vB), 2);
+        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::ARRAY_LENGTH:
+    case Instruction::NEG_INT:
+    case Instruction::NOT_INT:
+    case Instruction::NEG_FLOAT:
+    case Instruction::INT_TO_BYTE:
+    case Instruction::INT_TO_SHORT:
+    case Instruction::INT_TO_CHAR:
+    case Instruction::INT_TO_FLOAT:
+    case Instruction::FLOAT_TO_INT: {
+        // res = op + 1 operand
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::LONG_TO_FLOAT:
+    case Instruction::LONG_TO_INT:
+    case Instruction::DOUBLE_TO_FLOAT:
+    case Instruction::DOUBLE_TO_INT: {
+        // res = op + 1 wide operand
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+
+    case Instruction::DOUBLE_TO_LONG:
+    case Instruction::LONG_TO_DOUBLE:
+    case Instruction::NEG_LONG:
+    case Instruction::NOT_LONG:
+    case Instruction::NEG_DOUBLE: {
+        // wide res = op + 1 wide operand
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::FLOAT_TO_DOUBLE:
+    case Instruction::FLOAT_TO_LONG:
+    case Instruction::INT_TO_DOUBLE:
+    case Instruction::INT_TO_LONG: {
+        // wide res = op + 1 operand
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CMPL_DOUBLE:
+    case Instruction::CMPG_DOUBLE:
+    case Instruction::CMP_LONG: {
+        // res = op + 2 wide operands
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::CMPG_FLOAT:
+    case Instruction::CMPL_FLOAT:
+    case Instruction::ADD_INT:
+    case Instruction::ADD_INT_2ADDR:
+    case Instruction::MUL_INT:
+    case Instruction::MUL_INT_2ADDR:
+    case Instruction::AND_INT:
+    case Instruction::AND_INT_2ADDR:
+    case Instruction::OR_INT:
+    case Instruction::OR_INT_2ADDR:
+    case Instruction::XOR_INT:
+    case Instruction::XOR_INT_2ADDR:
+    case Instruction::SUB_INT:
+    case Instruction::SUB_INT_2ADDR:
+    case Instruction::DIV_INT:
+    case Instruction::DIV_INT_2ADDR:
+    case Instruction::REM_INT:
+    case Instruction::REM_INT_2ADDR:
+    case Instruction::SHL_INT:
+    case Instruction::SHL_INT_2ADDR:
+    case Instruction::SHR_INT:
+    case Instruction::SHR_INT_2ADDR:
+    case Instruction::USHR_INT:
+    case Instruction::USHR_INT_2ADDR: {
+        // res = op + 2 operands
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::ADD_LONG:
+    case Instruction::SUB_LONG:
+    case Instruction::MUL_LONG:
+    case Instruction::DIV_LONG:
+    case Instruction::REM_LONG:
+    case Instruction::AND_LONG:
+    case Instruction::OR_LONG:
+    case Instruction::XOR_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+    case Instruction::SUB_LONG_2ADDR:
+    case Instruction::MUL_LONG_2ADDR:
+    case Instruction::DIV_LONG_2ADDR:
+    case Instruction::REM_LONG_2ADDR:
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::OR_LONG_2ADDR:
+    case Instruction::XOR_LONG_2ADDR:
+    case Instruction::ADD_DOUBLE:
+    case Instruction::SUB_DOUBLE:
+    case Instruction::MUL_DOUBLE:
+    case Instruction::DIV_DOUBLE:
+    case Instruction::REM_DOUBLE:
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE_2ADDR: {
+        // wide res = op + 2 wide operands
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::SHL_LONG:
+    case Instruction::SHR_LONG:
+    case Instruction::USHR_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+    case Instruction::SHR_LONG_2ADDR:
+    case Instruction::USHR_LONG_2ADDR: {
+        // wide res = op + 1 wide operand + 1 operand
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::ADD_FLOAT:
+    case Instruction::SUB_FLOAT:
+    case Instruction::MUL_FLOAT:
+    case Instruction::DIV_FLOAT:
+    case Instruction::REM_FLOAT:
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT_2ADDR: {
+        // res = op + 2 operands
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::RSUB_INT:
+    case Instruction::ADD_INT_LIT16:
+    case Instruction::MUL_INT_LIT16:
+    case Instruction::DIV_INT_LIT16:
+    case Instruction::REM_INT_LIT16:
+    case Instruction::AND_INT_LIT16:
+    case Instruction::OR_INT_LIT16:
+    case Instruction::XOR_INT_LIT16:
+    case Instruction::ADD_INT_LIT8:
+    case Instruction::RSUB_INT_LIT8:
+    case Instruction::MUL_INT_LIT8:
+    case Instruction::DIV_INT_LIT8:
+    case Instruction::REM_INT_LIT8:
+    case Instruction::AND_INT_LIT8:
+    case Instruction::OR_INT_LIT8:
+    case Instruction::XOR_INT_LIT8:
+    case Instruction::SHL_INT_LIT8:
+    case Instruction::SHR_INT_LIT8:
+    case Instruction::USHR_INT_LIT8: {
+        // Same as res = op + 2 operands, except use vB as operand 2
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vB, 0, 0);
+        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        SetOperandValue(mir->ssa_rep->defs[0], res);
+      }
+      break;
+
+    case Instruction::AGET_WIDE:
+    case Instruction::AGET:
+    case Instruction::AGET_OBJECT:
+    case Instruction::AGET_BOOLEAN:
+    case Instruction::AGET_BYTE:
+    case Instruction::AGET_CHAR:
+    case Instruction::AGET_SHORT: {
+        uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
+        if (null_checked_.find(array) != null_checked_.end()) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          null_checked_.insert(array);
+        }
+        uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
+        if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+        }
+        mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+        // Use side effect to note range check completed.
+        (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+        // Establish value number for loaded register. Note use of memory version.
+        uint16_t memory_version = GetMemoryVersion(array, NO_VALUE);
+        uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
+        if (opcode == Instruction::AGET_WIDE) {
+          SetOperandValueWide(mir->ssa_rep->defs[0], res);
+        } else {
+          SetOperandValue(mir->ssa_rep->defs[0], res);
+        }
+      }
+      break;
+
+    case Instruction::APUT_WIDE:
+    case Instruction::APUT:
+    case Instruction::APUT_OBJECT:
+    case Instruction::APUT_SHORT:
+    case Instruction::APUT_CHAR:
+    case Instruction::APUT_BYTE:
+    case Instruction::APUT_BOOLEAN: {
+        int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+        int index_idx = array_idx + 1;
+        uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
+        if (null_checked_.find(array) != null_checked_.end()) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          null_checked_.insert(array);
+        }
+        uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
+        if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+        }
+        mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+        // Use side effect to note range check completed.
+        (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+        // Rev the memory version
+        AdvanceMemoryVersion(array, NO_VALUE);
+      }
+      break;
+
+    case Instruction::IGET_OBJECT:
+    case Instruction::IGET_WIDE:
+    case Instruction::IGET:
+    case Instruction::IGET_CHAR:
+    case Instruction::IGET_SHORT:
+    case Instruction::IGET_BOOLEAN:
+    case Instruction::IGET_BYTE: {
+        uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+        if (null_checked_.find(base) != null_checked_.end()) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          null_checked_.insert(base);
+        }
+        mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+        uint16_t field_ref = mir->dalvikInsn.vC;
+        uint16_t memory_version = GetMemoryVersion(base, field_ref);
+        if (opcode == Instruction::IGET_WIDE) {
+          uint16_t res = LookupValue(Instruction::IGET_WIDE, base, field_ref, memory_version);
+          SetOperandValueWide(mir->ssa_rep->defs[0], res);
+        } else {
+          uint16_t res = LookupValue(Instruction::IGET, base, field_ref, memory_version);
+          SetOperandValue(mir->ssa_rep->defs[0], res);
+        }
+      }
+      break;
+
+    case Instruction::IPUT_WIDE:
+    case Instruction::IPUT_OBJECT:
+    case Instruction::IPUT:
+    case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BYTE:
+    case Instruction::IPUT_CHAR:
+    case Instruction::IPUT_SHORT: {
+        int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
+        uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
+        if (null_checked_.find(base) != null_checked_.end()) {
+          if (cu_->verbose) {
+            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+          }
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          null_checked_.insert(base);
+        }
+        mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+        uint16_t field_ref = mir->dalvikInsn.vC;
+        AdvanceMemoryVersion(base, field_ref);
+      }
+      break;
+
+    case Instruction::SGET_OBJECT:
+    case Instruction::SGET:
+    case Instruction::SGET_BOOLEAN:
+    case Instruction::SGET_BYTE:
+    case Instruction::SGET_CHAR:
+    case Instruction::SGET_SHORT:
+    case Instruction::SGET_WIDE: {
+        uint16_t field_ref = mir->dalvikInsn.vB;
+        uint16_t memory_version = GetMemoryVersion(NO_VALUE, field_ref);
+        if (opcode == Instruction::SGET_WIDE) {
+          uint16_t res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_ref, memory_version);
+          SetOperandValueWide(mir->ssa_rep->defs[0], res);
+        } else {
+          uint16_t res = LookupValue(Instruction::SGET, NO_VALUE, field_ref, memory_version);
+          SetOperandValue(mir->ssa_rep->defs[0], res);
+        }
+      }
+      break;
+
+    case Instruction::SPUT_OBJECT:
+    case Instruction::SPUT:
+    case Instruction::SPUT_BOOLEAN:
+    case Instruction::SPUT_BYTE:
+    case Instruction::SPUT_CHAR:
+    case Instruction::SPUT_SHORT:
+    case Instruction::SPUT_WIDE: {
+        uint16_t field_ref = mir->dalvikInsn.vB;
+        AdvanceMemoryVersion(NO_VALUE, field_ref);
+      }
+      break;
+
+  }
+  return res;
+}
+
+}    // namespace art
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
new file mode 100644
index 0000000..beb4cea
--- /dev/null
+++ b/compiler/dex/local_value_numbering.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
+#define ART_SRC_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
+
+#include "compiler_internals.h"
+
+#define NO_VALUE 0xffff
+#define ARRAY_REF 0xfffe
+
+namespace art {
+
+// Key is s_reg, value is value name.
+typedef SafeMap<uint16_t, uint16_t> SregValueMap;
+// Key is concatenation of quad, value is value name.
+typedef SafeMap<uint64_t, uint16_t> ValueMap;
+// Key represents a memory address, value is generation.
+typedef SafeMap<uint32_t, uint16_t> MemoryVersionMap;
+
+class LocalValueNumbering {
+ public:
+  LocalValueNumbering(CompilationUnit* cu) : cu_(cu) {};
+
+  static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
+    return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
+            static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
+  };
+
+  uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
+    uint16_t res;
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    ValueMap::iterator it = value_map_.find(key);
+    if (it != value_map_.end()) {
+      res = it->second;
+    } else {
+      res = value_map_.size() + 1;
+      value_map_.Put(key, res);
+    }
+    return res;
+  };
+
+  bool ValueExists(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    ValueMap::const_iterator it = value_map_.find(key);
+    return (it != value_map_.end());
+  };
+
+  uint16_t GetMemoryVersion(uint16_t base, uint16_t field) {
+    uint32_t key = (base << 16) | field;
+    uint16_t res;
+    MemoryVersionMap::iterator it = memory_version_map_.find(key);
+    if (it == memory_version_map_.end()) {
+      res = 0;
+      memory_version_map_.Put(key, res);
+    } else {
+      res = it->second;
+    }
+    return res;
+  };
+
+  void AdvanceMemoryVersion(uint16_t base, uint16_t field) {
+    uint32_t key = (base << 16) | field;
+    MemoryVersionMap::iterator it = memory_version_map_.find(key);
+    if (it == memory_version_map_.end()) {
+      memory_version_map_.Put(key, 0);
+    } else {
+      it->second++;
+    }
+  };
+
+  void SetOperandValue(uint16_t s_reg, uint16_t value) {
+    SregValueMap::iterator it = sreg_value_map_.find(s_reg);
+    if (it != sreg_value_map_.end()) {
+      DCHECK_EQ(it->second, value);
+    } else {
+      sreg_value_map_.Put(s_reg, value);
+    }
+  };
+
+  uint16_t GetOperandValue(int s_reg) {
+    uint16_t res = NO_VALUE;
+    SregValueMap::iterator it = sreg_value_map_.find(s_reg);
+    if (it != sreg_value_map_.end()) {
+      res = it->second;
+    } else {
+      // First use
+      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      sreg_value_map_.Put(s_reg, res);
+    }
+    return res;
+  };
+
+  void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
+    SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
+    if (it != sreg_wide_value_map_.end()) {
+      DCHECK_EQ(it->second, value);
+    } else {
+      sreg_wide_value_map_.Put(s_reg, value);
+    }
+  };
+
+  uint16_t GetOperandValueWide(int s_reg) {
+    uint16_t res = NO_VALUE;
+    SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
+    if (it != sreg_wide_value_map_.end()) {
+      res = it->second;
+    } else {
+      // First use
+      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      sreg_wide_value_map_.Put(s_reg, res);
+    }
+    return res;
+  };
+
+  uint16_t GetValueNumber(MIR* mir);
+
+ private:
+  CompilationUnit* const cu_;
+  SregValueMap sreg_value_map_;
+  SregValueMap sreg_wide_value_map_;
+  ValueMap value_map_;
+  MemoryVersionMap memory_version_map_;
+  std::set<uint16_t> null_checked_;
+
+};
+
+} // namespace art
+
+#endif   // ART_SRC_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
new file mode 100644
index 0000000..6c152d2
--- /dev/null
+++ b/compiler/dex/mir_dataflow.cc
@@ -0,0 +1,1355 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "local_value_numbering.h"
+#include "dataflow_iterator-inl.h"
+
+namespace art {
+
+/*
+ * Main table containing data flow attributes for each bytecode. The
+ * first kNumPackedOpcodes entries are for Dalvik bytecode
+ * instructions, where extended opcode at the MIR level are appended
+ * afterwards.
+ *
+ * TODO - many optimization flags are incomplete - they will only limit the
+ * scope of optimizations but will not cause mis-optimizations.
+ */
+const int MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
+  // 00 NOP
+  DF_NOP,
+
+  // 01 MOVE vA, vB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 02 MOVE_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 03 MOVE_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 04 MOVE_WIDE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 06 MOVE_WIDE_16 vAAAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 07 MOVE_OBJECT vA, vB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 0A MOVE_RESULT vAA
+  DF_DA,
+
+  // 0B MOVE_RESULT_WIDE vAA
+  DF_DA | DF_A_WIDE,
+
+  // 0C MOVE_RESULT_OBJECT vAA
+  DF_DA | DF_REF_A,
+
+  // 0D MOVE_EXCEPTION vAA
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 0E RETURN_VOID
+  DF_NOP,
+
+  // 0F RETURN vAA
+  DF_UA,
+
+  // 10 RETURN_WIDE vAA
+  DF_UA | DF_A_WIDE,
+
+  // 11 RETURN_OBJECT vAA
+  DF_UA | DF_REF_A,
+
+  // 12 CONST_4 vA, #+B
+  DF_DA | DF_SETS_CONST,
+
+  // 13 CONST_16 vAA, #+BBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 14 CONST vAA, #+BBBBBBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 15 CONST_HIGH16 VAA, #+BBBB0000
+  DF_DA | DF_SETS_CONST,
+
+  // 16 CONST_WIDE_16 vAA, #+BBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 1A CONST_STRING vAA, string@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1C CONST_CLASS vAA, type@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1D MONITOR_ENTER vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1E MONITOR_EXIT vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1F CHK_CAST vAA, type@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 20 INSTANCE_OF vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+  // 21 ARRAY_LENGTH vA, vB
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+  // 22 NEW_INSTANCE vAA, type@BBBB
+  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+  // 23 NEW_ARRAY vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 27 THROW vAA
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 28 GOTO
+  DF_NOP,
+
+  // 29 GOTO_16
+  DF_NOP,
+
+  // 2A GOTO_32
+  DF_NOP,
+
+  // 2B PACKED_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2D CMPL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2E CMPG_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2F CMPL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 30 CMPG_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 31 CMP_LONG vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 32 IF_EQ vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 33 IF_NE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 34 IF_LT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 35 IF_GE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 36 IF_GT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 37 IF_LE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 38 IF_EQZ vAA, +BBBB
+  DF_UA,
+
+  // 39 IF_NEZ vAA, +BBBB
+  DF_UA,
+
+  // 3A IF_LTZ vAA, +BBBB
+  DF_UA,
+
+  // 3B IF_GEZ vAA, +BBBB
+  DF_UA,
+
+  // 3C IF_GTZ vAA, +BBBB
+  DF_UA,
+
+  // 3D IF_LEZ vAA, +BBBB
+  DF_UA,
+
+  // 3E UNUSED_3E
+  DF_NOP,
+
+  // 3F UNUSED_3F
+  DF_NOP,
+
+  // 40 UNUSED_40
+  DF_NOP,
+
+  // 41 UNUSED_41
+  DF_NOP,
+
+  // 42 UNUSED_42
+  DF_NOP,
+
+  // 43 UNUSED_43
+  DF_NOP,
+
+  // 44 AGET vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 45 AGET_WIDE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 46 AGET_OBJECT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 47 AGET_BOOLEAN vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 48 AGET_BYTE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 49 AGET_CHAR vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4A AGET_SHORT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4B APUT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4C APUT_WIDE vAA, vBB, vCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+  // 4D APUT_OBJECT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 4E APUT_BOOLEAN vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4F APUT_BYTE vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 50 APUT_CHAR vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 51 APUT_SHORT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 52 IGET vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 53 IGET_WIDE vA, vB, field@CCCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 54 IGET_OBJECT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // 55 IGET_BOOLEAN vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 56 IGET_BYTE vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 57 IGET_CHAR vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 58 IGET_SHORT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 59 IPUT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5A IPUT_WIDE vA, vB, field@CCCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // 5B IPUT_OBJECT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5D IPUT_BYTE vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5E IPUT_CHAR vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5F IPUT_SHORT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 60 SGET vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 61 SGET_WIDE vAA, field@BBBB
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // 62 SGET_OBJECT vAA, field@BBBB
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // 63 SGET_BOOLEAN vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 64 SGET_BYTE vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 65 SGET_CHAR vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 66 SGET_SHORT vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 67 SPUT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 68 SPUT_WIDE vAA, field@BBBB
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // 69 SPUT_OBJECT vAA, field@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 6A SPUT_BOOLEAN vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6B SPUT_BYTE vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6C SPUT_CHAR vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6D SPUT_SHORT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 73 UNUSED_73
+  DF_NOP,
+
+  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 79 UNUSED_79
+  DF_NOP,
+
+  // 7A UNUSED_7A
+  DF_NOP,
+
+  // 7B NEG_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7C NOT_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7D NEG_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7E NOT_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7F NEG_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 80 NEG_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 81 INT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 82 INT_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 83 INT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 84 LONG_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 85 LONG_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 86 LONG_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 87 FLOAT_TO_INT vA, vB
+  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 88 FLOAT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 89 FLOAT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 8A DOUBLE_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8B DOUBLE_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8C DOUBLE_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 8D INT_TO_BYTE vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8E INT_TO_CHAR vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8F INT_TO_SHORT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 90 ADD_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 91 SUB_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 92 MUL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 93 DIV_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 94 REM_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 95 AND_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 96 OR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 97 XOR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 98 SHL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 99 SHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9A USHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9B ADD_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9C SUB_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9D MUL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9E DIV_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9F REM_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A0 AND_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A1 OR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A2 XOR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A3 SHL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A4 SHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A5 USHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A6 ADD_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A7 SUB_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A8 MUL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A9 DIV_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AA REM_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AB ADD_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AC SUB_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AD MUL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AE DIV_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AF REM_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // B0 ADD_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B1 SUB_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B2 MUL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B3 DIV_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B4 REM_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B5 AND_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B6 OR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B7 XOR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B8 SHL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B9 SHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BA USHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BB ADD_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BC SUB_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BD MUL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BE DIV_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BF REM_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C0 AND_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C1 OR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C2 XOR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C3 SHL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C4 SHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C5 USHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C6 ADD_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C7 SUB_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C8 MUL_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C9 DIV_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CA REM_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CB ADD_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CC SUB_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CD MUL_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CE DIV_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CF REM_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D1 RSUB_INT vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D4 REM_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D5 AND_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D6 OR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DA MUL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DB DIV_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DC REM_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DD AND_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DE OR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DF XOR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E3 IGET_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E4 IPUT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // E5 SGET_VOLATILE
+  DF_DA | DF_UMS,
+
+  // E6 SPUT_VOLATILE
+  DF_UA | DF_UMS,
+
+  // E7 IGET_OBJECT_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // E8 IGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E9 IPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // EA SGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // EB SPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // EC BREAKPOINT
+  DF_NOP,
+
+  // ED THROW_VERIFICATION_ERROR
+  DF_NOP | DF_UMS,
+
+  // EE EXECUTE_INLINE
+  DF_FORMAT_35C,
+
+  // EF EXECUTE_INLINE_RANGE
+  DF_FORMAT_3RC,
+
+  // F0 INVOKE_OBJECT_INIT_RANGE
+  DF_NOP | DF_NULL_CHK_0,
+
+  // F1 RETURN_VOID_BARRIER
+  DF_NOP,
+
+  // F2 IGET_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F3 IGET_WIDE_QUICK
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+  // F4 IGET_OBJECT_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F5 IPUT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F6 IPUT_WIDE_QUICK
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+  // F7 IPUT_OBJECT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F8 INVOKE_VIRTUAL_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // F9 INVOKE_VIRTUAL_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FA INVOKE_SUPER_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FB INVOKE_SUPER_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FC IPUT_OBJECT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // FD SGET_OBJECT_VOLATILE
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // FE SPUT_OBJECT_VOLATILE
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // FF UNUSED_FF
+  DF_NOP,
+
+  // Beginning of extended MIR opcodes
+  // 100 MIR_PHI
+  DF_DA | DF_NULL_TRANSFER_N,
+
+  // 101 MIR_COPY
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 102 MIR_FUSED_CMPL_FLOAT
+  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 103 MIR_FUSED_CMPG_FLOAT
+  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 104 MIR_FUSED_CMPL_DOUBLE
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 105 MIR_FUSED_CMPG_DOUBLE
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 106 MIR_FUSED_CMP_LONG
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 107 MIR_NOP
+  DF_NOP,
+
+  // 108 MIR_NULL_CHECK
+  0,
+
+  // 109 MIR_RANGE_CHECK
+  0,
+
+  // 110 MIR_DIV_ZERO_CHECK
+  0,
+
+  // 111 MIR_CHECK
+  0,
+
+  // 112 MIR_CHECKPART2
+  0,
+
+  // 113 MIR_SELECT
+  DF_DA | DF_UB,
+};
+
+/* Return the base virtual register for a SSA name */
+int MIRGraph::SRegToVReg(int ssa_reg) const {
+  return ssa_base_vregs_->Get(ssa_reg);
+}
+
+/* Any register that is used before being defined is considered live-in */
+void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
+                            ArenaBitVector* live_in_v, int dalvik_reg_id)
+{
+  use_v->SetBit(dalvik_reg_id);
+  if (!def_v->IsBitSet(dalvik_reg_id)) {
+    live_in_v->SetBit(dalvik_reg_id);
+  }
+}
+
+/* Mark a reg as being defined */
+void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id)
+{
+  def_v->SetBit(dalvik_reg_id);
+}
+
+/*
+ * Find out live-in variables for natural loops. Variables that are live-in in
+ * the main loop body are considered to be defined in the entry block.
+ */
+bool MIRGraph::FindLocalLiveIn(BasicBlock* bb)
+{
+  MIR* mir;
+  ArenaBitVector *use_v, *def_v, *live_in_v;
+
+  if (bb->data_flow_info == NULL) return false;
+
+  use_v = bb->data_flow_info->use_v =
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapUse);
+  def_v = bb->data_flow_info->def_v =
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapDef);
+  live_in_v = bb->data_flow_info->live_in_v =
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (df_attributes & DF_HAS_USES) {
+      if (df_attributes & DF_UA) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA);
+        if (df_attributes & DF_A_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA+1);
+        }
+      }
+      if (df_attributes & DF_UB) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB);
+        if (df_attributes & DF_B_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB+1);
+        }
+      }
+      if (df_attributes & DF_UC) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC);
+        if (df_attributes & DF_C_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+1);
+        }
+      }
+    }
+    if (df_attributes & DF_FORMAT_35C) {
+      for (unsigned int i = 0; i < d_insn->vA; i++) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->arg[i]);
+      }
+    }
+    if (df_attributes & DF_FORMAT_3RC) {
+      for (unsigned int i = 0; i < d_insn->vA; i++) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+i);
+      }
+    }
+    if (df_attributes & DF_HAS_DEFS) {
+      HandleDef(def_v, d_insn->vA);
+      if (df_attributes & DF_A_WIDE) {
+        HandleDef(def_v, d_insn->vA+1);
+      }
+    }
+  }
+  return true;
+}
+
+int MIRGraph::AddNewSReg(int v_reg)
+{
+  // Compiler temps always have a subscript of 0
+  int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg];
+  int ssa_reg = GetNumSSARegs();
+  SetNumSSARegs(ssa_reg + 1);
+  ssa_base_vregs_->Insert(v_reg);
+  ssa_subscripts_->Insert(subscript);
+  DCHECK_EQ(ssa_base_vregs_->Size(), ssa_subscripts_->Size());
+  return ssa_reg;
+}
+
+/* Find out the latest SSA register for a given Dalvik register */
+void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index)
+{
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
+}
+
+/* Setup a new SSA register for a given Dalvik register */
+void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index)
+{
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  int ssa_reg = AddNewSReg(dalvik_reg);
+  vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
+  defs[reg_index] = ssa_reg;
+}
+
+/* Look up new SSA names for format_35c instructions */
+void MIRGraph::DataFlowSSAFormat35C(MIR* mir)
+{
+  DecodedInstruction *d_insn = &mir->dalvikInsn;
+  int num_uses = d_insn->vA;
+  int i;
+
+  mir->ssa_rep->num_uses = num_uses;
+  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
+                                                        ArenaAllocator::kAllocDFInfo));
+  // NOTE: will be filled in during type & size inference pass
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                                           ArenaAllocator::kAllocDFInfo));
+
+  for (i = 0; i < num_uses; i++) {
+    HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
+  }
+}
+
+/* Look up new SSA names for format_3rc instructions */
+void MIRGraph::DataFlowSSAFormat3RC(MIR* mir)
+{
+  DecodedInstruction *d_insn = &mir->dalvikInsn;
+  int num_uses = d_insn->vA;
+  int i;
+
+  mir->ssa_rep->num_uses = num_uses;
+  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
+                                                        ArenaAllocator::kAllocDFInfo));
+  // NOTE: will be filled in during type & size inference pass
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                                           ArenaAllocator::kAllocDFInfo));
+
+  for (i = 0; i < num_uses; i++) {
+    HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
+  }
+}
+
+/* Entry function to convert a block into SSA representation */
+bool MIRGraph::DoSSAConversion(BasicBlock* bb)
+{
+  MIR* mir;
+
+  if (bb->data_flow_info == NULL) return false;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    mir->ssa_rep =
+        static_cast<struct SSARepresentation *>(arena_->NewMem(sizeof(SSARepresentation), true,
+                                                               ArenaAllocator::kAllocDFInfo));
+
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+
+      // If not a pseudo-op, note non-leaf or can throw
+    if (static_cast<int>(mir->dalvikInsn.opcode) <
+        static_cast<int>(kNumPackedOpcodes)) {
+      int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
+
+      if (flags & Instruction::kInvoke) {
+        attributes_ &= ~METHOD_IS_LEAF;
+      }
+    }
+
+    int num_uses = 0;
+
+    if (df_attributes & DF_FORMAT_35C) {
+      DataFlowSSAFormat35C(mir);
+      continue;
+    }
+
+    if (df_attributes & DF_FORMAT_3RC) {
+      DataFlowSSAFormat3RC(mir);
+      continue;
+    }
+
+    if (df_attributes & DF_HAS_USES) {
+      if (df_attributes & DF_UA) {
+        num_uses++;
+        if (df_attributes & DF_A_WIDE) {
+          num_uses ++;
+        }
+      }
+      if (df_attributes & DF_UB) {
+        num_uses++;
+        if (df_attributes & DF_B_WIDE) {
+          num_uses ++;
+        }
+      }
+      if (df_attributes & DF_UC) {
+        num_uses++;
+        if (df_attributes & DF_C_WIDE) {
+          num_uses ++;
+        }
+      }
+    }
+
+    if (num_uses) {
+      mir->ssa_rep->num_uses = num_uses;
+      mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                                            ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, false,
+                                                               ArenaAllocator::kAllocDFInfo));
+    }
+
+    int num_defs = 0;
+
+    if (df_attributes & DF_HAS_DEFS) {
+      num_defs++;
+      if (df_attributes & DF_A_WIDE) {
+        num_defs++;
+      }
+    }
+
+    if (num_defs) {
+      mir->ssa_rep->num_defs = num_defs;
+      mir->ssa_rep->defs = static_cast<int*>(arena_->NewMem(sizeof(int) * num_defs, false,
+                                                            ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_defs, false,
+                                                               ArenaAllocator::kAllocDFInfo));
+    }
+
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (df_attributes & DF_HAS_USES) {
+      num_uses = 0;
+      if (df_attributes & DF_UA) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
+        if (df_attributes & DF_A_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
+        }
+      }
+      if (df_attributes & DF_UB) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
+        if (df_attributes & DF_B_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
+        }
+      }
+      if (df_attributes & DF_UC) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
+        if (df_attributes & DF_C_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
+        }
+      }
+    }
+    if (df_attributes & DF_HAS_DEFS) {
+      mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
+      HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
+      if (df_attributes & DF_A_WIDE) {
+        mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
+        HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
+      }
+    }
+  }
+
+  /*
+   * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+   * input to PHI nodes can be derived from the snapshot of all
+   * predecessor blocks.
+   */
+  bb->data_flow_info->vreg_to_ssa_map =
+      static_cast<int*>(arena_->NewMem(sizeof(int) * cu_->num_dalvik_registers, false,
+                                       ArenaAllocator::kAllocDFInfo));
+
+  memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+         sizeof(int) * cu_->num_dalvik_registers);
+  return true;
+}
+
+/* Setup the basic data structures for SSA conversion */
+void MIRGraph::CompilerInitializeSSAConversion()
+{
+  size_t num_dalvik_reg = cu_->num_dalvik_registers;
+
+  ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+                                            kGrowableArraySSAtoDalvikMap);
+  ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+                                            kGrowableArraySSAtoDalvikMap);
+  /*
+   * Initial number of SSA registers is equal to the number of Dalvik
+   * registers.
+   */
+  SetNumSSARegs(num_dalvik_reg);
+
+  /*
+   * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
+   * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
+   * into "(0 << 16) | i"
+   */
+  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
+    ssa_base_vregs_->Insert(i);
+    ssa_subscripts_->Insert(0);
+  }
+
+  /*
+   * Initialize the DalvikToSSAMap map. There is one entry for each
+   * Dalvik register, and the SSA names for those are the same.
+   */
+  vreg_to_ssa_map_ =
+      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
+                                       ArenaAllocator::kAllocDFInfo));
+  /* Keep track of the higest def for each dalvik reg */
+  ssa_last_defs_ =
+      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
+                                       ArenaAllocator::kAllocDFInfo));
+
+  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
+    vreg_to_ssa_map_[i] = i;
+    ssa_last_defs_[i] = 0;
+  }
+
+  /* Add ssa reg for Method* */
+  method_sreg_ = AddNewSReg(SSA_METHOD_BASEREG);
+
+  /*
+   * Allocate the BasicBlockDataFlow structure for the entry and code blocks
+   */
+  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
+
+  while (true) {
+    BasicBlock* bb = iterator.Next();
+    if (bb == NULL) break;
+    if (bb->hidden == true) continue;
+    if (bb->block_type == kDalvikByteCode ||
+      bb->block_type == kEntryBlock ||
+      bb->block_type == kExitBlock) {
+      bb->data_flow_info =
+          static_cast<BasicBlockDataFlow*>(arena_->NewMem(sizeof(BasicBlockDataFlow), true,
+                                                          ArenaAllocator::kAllocDFInfo));
+      }
+  }
+}
+
+/*
+ * This function will make a best guess at whether the invoke will
+ * end up using Method*.  It isn't critical to get it exactly right,
+ * and attempting to do would involve more complexity than it's
+ * worth.
+ */
+bool MIRGraph::InvokeUsesMethodStar(MIR* mir)
+{
+  InvokeType type;
+  Instruction::Code opcode = mir->dalvikInsn.opcode;
+  switch (opcode) {
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
+      type = kStatic;
+      break;
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+      type = kDirect;
+      break;
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      type = kVirtual;
+      break;
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE:
+      return false;
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_SUPER:
+      type = kSuper;
+      break;
+    default:
+      LOG(WARNING) << "Unexpected invoke op: " << opcode;
+      return false;
+  }
+  DexCompilationUnit m_unit(cu_);
+  MethodReference target_method(cu_->dex_file, mir->dalvikInsn.vB);
+  int vtable_idx;
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  uint32_t current_offset = static_cast<uint32_t>(current_offset_);
+  bool fast_path =
+      cu_->compiler_driver->ComputeInvokeInfo(&m_unit, current_offset,
+                                              type, target_method,
+                                              vtable_idx,
+                                              direct_code, direct_method,
+                                              false) &&
+                                              !(cu_->enable_debug & (1 << kDebugSlowInvokePath));
+  return (((type == kDirect) || (type == kStatic)) &&
+          fast_path && ((direct_code == 0) || (direct_method == 0)));
+}
+
+/*
+ * Count uses, weighting by loop nesting depth.  This code only
+ * counts explicitly used s_regs.  A later phase will add implicit
+ * counts for things such as Method*, null-checked references, etc.
+ */
+bool MIRGraph::CountUses(struct BasicBlock* bb)
+{
+  if (bb->block_type != kDalvikByteCode) {
+    return false;
+  }
+  for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
+    if (mir->ssa_rep == NULL) {
+      continue;
+    }
+    uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
+    for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+      int s_reg = mir->ssa_rep->uses[i];
+      raw_use_counts_.Increment(s_reg);
+      use_counts_.Put(s_reg, use_counts_.Get(s_reg) + (1 << weight));
+    }
+    if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
+      int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+      // Implicit use of Method* ? */
+      if (df_attributes & DF_UMS) {
+        /*
+         * Some invokes will not use Method* - need to perform test similar
+         * to that found in GenInvoke() to decide whether to count refs
+         * for Method* on invoke-class opcodes.
+         * TODO: refactor for common test here, save results for GenInvoke
+         */
+        int uses_method_star = true;
+        if ((df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
+            !(df_attributes & DF_NON_NULL_RET)) {
+          uses_method_star &= InvokeUsesMethodStar(mir);
+        }
+        if (uses_method_star) {
+          raw_use_counts_.Increment(method_sreg_);
+          use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + (1 << weight));
+        }
+      }
+    }
+  }
+  return false;
+}
+
+void MIRGraph::MethodUseCount()
+{
+  // Now that we know, resize the lists.
+  int num_ssa_regs = GetNumSSARegs();
+  use_counts_.Resize(num_ssa_regs + 32);
+  raw_use_counts_.Resize(num_ssa_regs + 32);
+  // Initialize list
+  for (int i = 0; i < num_ssa_regs; i++) {
+    use_counts_.Insert(0);
+    raw_use_counts_.Insert(0);
+  }
+  if (cu_->disable_opt & (1 << kPromoteRegs)) {
+    return;
+  }
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CountUses(bb);
+  }
+}
+
+/* Verify if all the successor is connected with all the claimed predecessors */
+bool MIRGraph::VerifyPredInfo(BasicBlock* bb)
+{
+  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+
+  while (true) {
+    BasicBlock *pred_bb = iter.Next();
+    if (!pred_bb) break;
+    bool found = false;
+    if (pred_bb->taken == bb) {
+        found = true;
+    } else if (pred_bb->fall_through == bb) {
+        found = true;
+    } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_block_list.blocks);
+      while (true) {
+        SuccessorBlockInfo *successor_block_info = iterator.Next();
+        if (successor_block_info == NULL) break;
+        BasicBlock *succ_bb = successor_block_info->block;
+        if (succ_bb == bb) {
+            found = true;
+            break;
+        }
+      }
+    }
+    if (found == false) {
+      char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+      GetBlockName(bb, block_name1);
+      GetBlockName(pred_bb, block_name2);
+      DumpCFG("/sdcard/cfg/", false);
+      LOG(FATAL) << "Successor " << block_name1 << "not found from "
+                 << block_name2;
+    }
+  }
+  return true;
+}
+
+void MIRGraph::VerifyDataflow()
+{
+    /* Verify if all blocks are connected as claimed */
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    VerifyPredInfo(bb);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
new file mode 100644
index 0000000..11e100d
--- /dev/null
+++ b/compiler/dex/mir_graph.cc
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/stl_util.h"
+#include "compiler_internals.h"
+#include "dex_file-inl.h"
+#include "leb128.h"
+#include "mir_graph.h"
+
+namespace art {
+
+#define MAX_PATTERN_LEN 5
+
+struct CodePattern {
+  const Instruction::Code opcodes[MAX_PATTERN_LEN];
+  const SpecialCaseHandler handler_code;
+};
+
+static const CodePattern special_patterns[] = {
+  {{Instruction::RETURN_VOID}, kNullMethod},
+  {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
+  {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
+  {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
+  {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
+  {{Instruction::IGET, Instruction:: RETURN}, kIGet},
+  {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
+  {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
+  {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
+  {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
+  {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
+  {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
+  {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
+  {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
+  {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
+  {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
+  {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
+  {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
+  {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
+  {{Instruction::RETURN}, kIdentity},
+  {{Instruction::RETURN_OBJECT}, kIdentity},
+  {{Instruction::RETURN_WIDE}, kIdentity},
+};
+
+const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
+  "Phi",
+  "Copy",
+  "FusedCmplFloat",
+  "FusedCmpgFloat",
+  "FusedCmplDouble",
+  "FusedCmpgDouble",
+  "FusedCmpLong",
+  "Nop",
+  "OpNullCheck",
+  "OpRangeCheck",
+  "OpDivZeroCheck",
+  "Check1",
+  "Check2",
+  "Select",
+};
+
+MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
+    : reg_location_(NULL),
+      compiler_temps_(arena, 6, kGrowableArrayMisc),
+      cu_(cu),
+      ssa_base_vregs_(NULL),
+      ssa_subscripts_(NULL),
+      vreg_to_ssa_map_(NULL),
+      ssa_last_defs_(NULL),
+      is_constant_v_(NULL),
+      constant_values_(NULL),
+      use_counts_(arena, 256, kGrowableArrayMisc),
+      raw_use_counts_(arena, 256, kGrowableArrayMisc),
+      num_reachable_blocks_(0),
+      dfs_order_(NULL),
+      dfs_post_order_(NULL),
+      dom_post_order_traversal_(NULL),
+      i_dom_list_(NULL),
+      def_block_matrix_(NULL),
+      temp_block_v_(NULL),
+      temp_dalvik_register_v_(NULL),
+      temp_ssa_register_v_(NULL),
+      block_list_(arena, 100, kGrowableArrayBlockList),
+      try_block_addr_(NULL),
+      entry_block_(NULL),
+      exit_block_(NULL),
+      cur_block_(NULL),
+      num_blocks_(0),
+      current_code_item_(NULL),
+      current_method_(kInvalidEntry),
+      current_offset_(kInvalidEntry),
+      def_count_(0),
+      opcode_count_(NULL),
+      num_ssa_regs_(0),
+      method_sreg_(0),
+      attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
+      checkstats_(NULL),
+      arena_(arena)
+      {
+  try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
+}
+
+MIRGraph::~MIRGraph() {
+  STLDeleteElements(&m_units_);
+}
+
+bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) {
+  uint16_t instr = *code_ptr;
+  Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff);
+  /*
+   * Since the low 8-bit in metadata may look like NOP, we need to check
+   * both the low and whole sub-word to determine whether it is code or data.
+   */
+  return (opcode != Instruction::NOP || instr == 0);
+}
+
+/*
+ * Parse an instruction, return the length of the instruction
+ */
+int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction)
+{
+  // Don't parse instruction data
+  if (!ContentIsInsn(code_ptr)) {
+    return 0;
+  }
+
+  const Instruction* instruction = Instruction::At(code_ptr);
+  *decoded_instruction = DecodedInstruction(instruction);
+
+  return instruction->SizeInCodeUnits();
+}
+
+
+/* Split an existing block from the specified code offset into two */
+BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset,
+                                 BasicBlock* orig_block, BasicBlock** immed_pred_block_p)
+{
+  MIR* insn = orig_block->first_mir_insn;
+  while (insn) {
+    if (insn->offset == code_offset) break;
+    insn = insn->next;
+  }
+  if (insn == NULL) {
+    LOG(FATAL) << "Break split failed";
+  }
+  BasicBlock *bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(bottom_block);
+
+  bottom_block->start_offset = code_offset;
+  bottom_block->first_mir_insn = insn;
+  bottom_block->last_mir_insn = orig_block->last_mir_insn;
+
+  /* If this block was terminated by a return, the flag needs to go with the bottom block */
+  bottom_block->terminated_by_return = orig_block->terminated_by_return;
+  orig_block->terminated_by_return = false;
+
+  /* Add it to the quick lookup cache */
+  block_map_.Put(bottom_block->start_offset, bottom_block);
+
+  /* Handle the taken path */
+  bottom_block->taken = orig_block->taken;
+  if (bottom_block->taken) {
+    orig_block->taken = NULL;
+    bottom_block->taken->predecessors->Delete(orig_block);
+    bottom_block->taken->predecessors->Insert(bottom_block);
+  }
+
+  /* Handle the fallthrough path */
+  bottom_block->fall_through = orig_block->fall_through;
+  orig_block->fall_through = bottom_block;
+  bottom_block->predecessors->Insert(orig_block);
+  if (bottom_block->fall_through) {
+    bottom_block->fall_through->predecessors->Delete(orig_block);
+    bottom_block->fall_through->predecessors->Insert(bottom_block);
+  }
+
+  /* Handle the successor list */
+  if (orig_block->successor_block_list.block_list_type != kNotUsed) {
+    bottom_block->successor_block_list = orig_block->successor_block_list;
+    orig_block->successor_block_list.block_list_type = kNotUsed;
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks);
+    while (true) {
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
+      if (successor_block_info == NULL) break;
+      BasicBlock *bb = successor_block_info->block;
+      bb->predecessors->Delete(orig_block);
+      bb->predecessors->Insert(bottom_block);
+    }
+  }
+
+  orig_block->last_mir_insn = insn->prev;
+
+  insn->prev->next = NULL;
+  insn->prev = NULL;
+  /*
+   * Update the immediate predecessor block pointer so that outgoing edges
+   * can be applied to the proper block.
+   */
+  if (immed_pred_block_p) {
+    DCHECK_EQ(*immed_pred_block_p, orig_block);
+    *immed_pred_block_p = bottom_block;
+  }
+  return bottom_block;
+}
+
+/*
+ * Given a code offset, find out the block that starts with it. If the offset
+ * is in the middle of an existing block, split it into two.  If immed_pred_block_p
+ * is not non-null and is the block being split, update *immed_pred_block_p to
+ * point to the bottom block so that outgoing edges can be set up properly
+ * (by the caller)
+ * Utilizes a map for fast lookup of the typical cases.
+ */
+BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create,
+                                BasicBlock** immed_pred_block_p)
+{
+  BasicBlock* bb;
+  unsigned int i;
+  SafeMap<unsigned int, BasicBlock*>::iterator it;
+
+  it = block_map_.find(code_offset);
+  if (it != block_map_.end()) {
+    return it->second;
+  } else if (!create) {
+    return NULL;
+  }
+
+  if (split) {
+    for (i = 0; i < block_list_.Size(); i++) {
+      bb = block_list_.Get(i);
+      if (bb->block_type != kDalvikByteCode) continue;
+      /* Check if a branch jumps into the middle of an existing block */
+      if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
+          (code_offset <= bb->last_mir_insn->offset)) {
+        BasicBlock *new_bb = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?
+                                       immed_pred_block_p : NULL);
+        return new_bb;
+      }
+    }
+  }
+
+  /* Create a new one */
+  bb = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(bb);
+  bb->start_offset = code_offset;
+  block_map_.Put(bb->start_offset, bb);
+  return bb;
+}
+
+/* Identify code range in try blocks and set up the empty catch blocks */
+void MIRGraph::ProcessTryCatchBlocks()
+{
+  int tries_size = current_code_item_->tries_size_;
+  int offset;
+
+  if (tries_size == 0) {
+    return;
+  }
+
+  for (int i = 0; i < tries_size; i++) {
+    const DexFile::TryItem* pTry =
+        DexFile::GetTryItems(*current_code_item_, i);
+    int start_offset = pTry->start_addr_;
+    int end_offset = start_offset + pTry->insn_count_;
+    for (offset = start_offset; offset < end_offset; offset++) {
+      try_block_addr_->SetBit(offset);
+    }
+  }
+
+  // Iterate over each of the handlers to enqueue the empty Catch blocks
+  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
+  uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+  for (uint32_t idx = 0; idx < handlers_size; idx++) {
+    CatchHandlerIterator iterator(handlers_ptr);
+    for (; iterator.HasNext(); iterator.Next()) {
+      uint32_t address = iterator.GetHandlerAddress();
+      FindBlock(address, false /* split */, true /*create*/,
+                /* immed_pred_block_p */ NULL);
+    }
+    handlers_ptr = iterator.EndDataPointer();
+  }
+}
+
+/* Process instructions with the kBranch flag */
+BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                       int flags, const uint16_t* code_ptr,
+                                       const uint16_t* code_end)
+{
+  int target = cur_offset;
+  switch (insn->dalvikInsn.opcode) {
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32:
+      target += insn->dalvikInsn.vA;
+      break;
+    case Instruction::IF_EQ:
+    case Instruction::IF_NE:
+    case Instruction::IF_LT:
+    case Instruction::IF_GE:
+    case Instruction::IF_GT:
+    case Instruction::IF_LE:
+      cur_block->conditional_branch = true;
+      target += insn->dalvikInsn.vC;
+      break;
+    case Instruction::IF_EQZ:
+    case Instruction::IF_NEZ:
+    case Instruction::IF_LTZ:
+    case Instruction::IF_GEZ:
+    case Instruction::IF_GTZ:
+    case Instruction::IF_LEZ:
+      cur_block->conditional_branch = true;
+      target += insn->dalvikInsn.vB;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
+  }
+  BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
+                                      /* immed_pred_block_p */ &cur_block);
+  cur_block->taken = taken_block;
+  taken_block->predecessors->Insert(cur_block);
+
+  /* Always terminate the current block for conditional branches */
+  if (flags & Instruction::kContinue) {
+    BasicBlock *fallthrough_block = FindBlock(cur_offset +  width,
+                                             /*
+                                              * If the method is processed
+                                              * in sequential order from the
+                                              * beginning, we don't need to
+                                              * specify split for continue
+                                              * blocks. However, this
+                                              * routine can be called by
+                                              * compileLoop, which starts
+                                              * parsing the method from an
+                                              * arbitrary address in the
+                                              * method body.
+                                              */
+                                             true,
+                                             /* create */
+                                             true,
+                                             /* immed_pred_block_p */
+                                             &cur_block);
+    cur_block->fall_through = fallthrough_block;
+    fallthrough_block->predecessors->Insert(cur_block);
+  } else if (code_ptr < code_end) {
+    /* Create a fallthrough block for real instructions (incl. NOP) */
+    if (ContentIsInsn(code_ptr)) {
+      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
+                /* immed_pred_block_p */ NULL);
+    }
+  }
+  return cur_block;
+}
+
+/* Process instructions with the kSwitch flag */
+void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                int flags)
+{
+  const uint16_t* switch_data =
+      reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
+  int size;
+  const int* keyTable;
+  const int* target_table;
+  int i;
+  int first_key;
+
+  /*
+   * Packed switch data format:
+   *  ushort ident = 0x0100   magic value
+   *  ushort size             number of entries in the table
+   *  int first_key           first (and lowest) switch case value
+   *  int targets[size]       branch targets, relative to switch opcode
+   *
+   * Total size is (4+size*2) 16-bit code units.
+   */
+  if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
+    DCHECK_EQ(static_cast<int>(switch_data[0]),
+              static_cast<int>(Instruction::kPackedSwitchSignature));
+    size = switch_data[1];
+    first_key = switch_data[2] | (switch_data[3] << 16);
+    target_table = reinterpret_cast<const int*>(&switch_data[4]);
+    keyTable = NULL;        // Make the compiler happy
+  /*
+   * Sparse switch data format:
+   *  ushort ident = 0x0200   magic value
+   *  ushort size             number of entries in the table; > 0
+   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
+   *  int targets[size]       branch targets, relative to switch opcode
+   *
+   * Total size is (2+size*4) 16-bit code units.
+   */
+  } else {
+    DCHECK_EQ(static_cast<int>(switch_data[0]),
+              static_cast<int>(Instruction::kSparseSwitchSignature));
+    size = switch_data[1];
+    keyTable = reinterpret_cast<const int*>(&switch_data[2]);
+    target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
+    first_key = 0;   // To make the compiler happy
+  }
+
+  if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+    LOG(FATAL) << "Successor block list already in use: "
+               << static_cast<int>(cur_block->successor_block_list.block_list_type);
+  }
+  cur_block->successor_block_list.block_list_type =
+      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+      kPackedSwitch : kSparseSwitch;
+  cur_block->successor_block_list.blocks =
+      new (arena_)GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
+
+  for (i = 0; i < size; i++) {
+    BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
+                                      /* create */ true, /* immed_pred_block_p */ &cur_block);
+    SuccessorBlockInfo *successor_block_info =
+        static_cast<SuccessorBlockInfo*>(arena_->NewMem(sizeof(SuccessorBlockInfo), false,
+                                                        ArenaAllocator::kAllocSuccessor));
+    successor_block_info->block = case_block;
+    successor_block_info->key =
+        (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+        first_key + i : keyTable[i];
+    cur_block->successor_block_list.blocks->Insert(successor_block_info);
+    case_block->predecessors->Insert(cur_block);
+  }
+
+  /* Fall-through case */
+  BasicBlock* fallthrough_block = FindBlock( cur_offset +  width, /* split */ false,
+                                           /* create */ true, /* immed_pred_block_p */ NULL);
+  cur_block->fall_through = fallthrough_block;
+  fallthrough_block->predecessors->Insert(cur_block);
+}
+
+/* Process instructions with the kThrow flag */
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                      int flags, ArenaBitVector* try_block_addr,
+                                      const uint16_t* code_ptr, const uint16_t* code_end)
+{
+  bool in_try_block = try_block_addr->IsBitSet(cur_offset);
+
+  /* In try block */
+  if (in_try_block) {
+    CatchHandlerIterator iterator(*current_code_item_, cur_offset);
+
+    if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+      LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+      LOG(FATAL) << "Successor block list already in use: "
+                 << static_cast<int>(cur_block->successor_block_list.block_list_type);
+    }
+
+    cur_block->successor_block_list.block_list_type = kCatch;
+    cur_block->successor_block_list.blocks =
+        new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
+
+    for (;iterator.HasNext(); iterator.Next()) {
+      BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
+                                         false /* creat */, NULL  /* immed_pred_block_p */);
+      catch_block->catch_entry = true;
+      if (kIsDebugBuild) {
+        catches_.insert(catch_block->start_offset);
+      }
+      SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+          (arena_->NewMem(sizeof(SuccessorBlockInfo), false, ArenaAllocator::kAllocSuccessor));
+      successor_block_info->block = catch_block;
+      successor_block_info->key = iterator.GetHandlerTypeIndex();
+      cur_block->successor_block_list.blocks->Insert(successor_block_info);
+      catch_block->predecessors->Insert(cur_block);
+    }
+  } else {
+    BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
+    cur_block->taken = eh_block;
+    block_list_.Insert(eh_block);
+    eh_block->start_offset = cur_offset;
+    eh_block->predecessors->Insert(cur_block);
+  }
+
+  if (insn->dalvikInsn.opcode == Instruction::THROW){
+    cur_block->explicit_throw = true;
+    if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) {
+      // Force creation of new block following THROW via side-effect
+      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
+                /* immed_pred_block_p */ NULL);
+    }
+    if (!in_try_block) {
+       // Don't split a THROW that can't rethrow - we're done.
+      return cur_block;
+    }
+  }
+
+  /*
+   * Split the potentially-throwing instruction into two parts.
+   * The first half will be a pseudo-op that captures the exception
+   * edges and terminates the basic block.  It always falls through.
+   * Then, create a new basic block that begins with the throwing instruction
+   * (minus exceptions).  Note: this new basic block must NOT be entered into
+   * the block_map.  If the potentially-throwing instruction is the target of a
+   * future branch, we need to find the check psuedo half.  The new
+   * basic block containing the work portion of the instruction should
+   * only be entered via fallthrough from the block containing the
+   * pseudo exception edge MIR.  Note also that this new block is
+   * not automatically terminated after the work portion, and may
+   * contain following instructions.
+   */
+  BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(new_block);
+  new_block->start_offset = insn->offset;
+  cur_block->fall_through = new_block;
+  new_block->predecessors->Insert(cur_block);
+  MIR* new_insn = static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
+  *new_insn = *insn;
+  insn->dalvikInsn.opcode =
+      static_cast<Instruction::Code>(kMirOpCheck);
+  // Associate the two halves
+  insn->meta.throw_insn = new_insn;
+  new_insn->meta.throw_insn = insn;
+  AppendMIR(new_block, new_insn);
+  return new_block;
+}
+
+/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
+void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                           InvokeType invoke_type, uint32_t class_def_idx,
+                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file)
+{
+  current_code_item_ = code_item;
+  method_stack_.push_back(std::make_pair(current_method_, current_offset_));
+  current_method_ = m_units_.size();
+  current_offset_ = 0;
+  // TODO: will need to snapshot stack image and use that as the mir context identification.
+  m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
+                     dex_file, current_code_item_, class_def_idx, method_idx, access_flags));
+  const uint16_t* code_ptr = current_code_item_->insns_;
+  const uint16_t* code_end =
+      current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
+
+  // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
+  block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
+  // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
+  try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
+  try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
+
+  // If this is the first method, set up default entry and exit blocks.
+  if (current_method_ == 0) {
+    DCHECK(entry_block_ == NULL);
+    DCHECK(exit_block_ == NULL);
+    DCHECK(num_blocks_ == 0);
+    entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
+    exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
+    block_list_.Insert(entry_block_);
+    block_list_.Insert(exit_block_);
+    // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
+    cu_->dex_file = &dex_file;
+    cu_->class_def_idx = class_def_idx;
+    cu_->method_idx = method_idx;
+    cu_->access_flags = access_flags;
+    cu_->invoke_type = invoke_type;
+    cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+    cu_->num_ins = current_code_item_->ins_size_;
+    cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins;
+    cu_->num_outs = current_code_item_->outs_size_;
+    cu_->num_dalvik_registers = current_code_item_->registers_size_;
+    cu_->insns = current_code_item_->insns_;
+    cu_->code_item = current_code_item_;
+  } else {
+    UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
+    /*
+     * Will need to manage storage for ins & outs, push prevous state and update
+     * insert point.
+     */
+  }
+
+  /* Current block to record parsed instructions */
+  BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  DCHECK_EQ(current_offset_, 0);
+  cur_block->start_offset = current_offset_;
+  block_list_.Insert(cur_block);
+  /* Add first block to the fast lookup cache */
+// FIXME: block map needs association with offset/method pair rather than just offset
+  block_map_.Put(cur_block->start_offset, cur_block);
+// FIXME: this needs to insert at the insert point rather than entry block.
+  entry_block_->fall_through = cur_block;
+  cur_block->predecessors->Insert(entry_block_);
+
+    /* Identify code range in try blocks and set up the empty catch blocks */
+  ProcessTryCatchBlocks();
+
+  /* Set up for simple method detection */
+  int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
+  bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch));
+  bool* dead_pattern =
+      static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_patterns, true,
+                                        ArenaAllocator::kAllocMisc));
+  SpecialCaseHandler special_case = kNoHandler;
+  // FIXME - wire this up
+  (void)special_case;
+  int pattern_pos = 0;
+
+  /* Parse all instructions and put them into containing basic blocks */
+  while (code_ptr < code_end) {
+    MIR *insn = static_cast<MIR *>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
+    insn->offset = current_offset_;
+    insn->m_unit_index = current_method_;
+    int width = ParseInsn(code_ptr, &insn->dalvikInsn);
+    insn->width = width;
+    Instruction::Code opcode = insn->dalvikInsn.opcode;
+    if (opcode_count_ != NULL) {
+      opcode_count_[static_cast<int>(opcode)]++;
+    }
+
+    /* Terminate when the data section is seen */
+    if (width == 0)
+      break;
+
+    /* Possible simple method? */
+    if (live_pattern) {
+      live_pattern = false;
+      special_case = kNoHandler;
+      for (int i = 0; i < num_patterns; i++) {
+        if (!dead_pattern[i]) {
+          if (special_patterns[i].opcodes[pattern_pos] == opcode) {
+            live_pattern = true;
+            special_case = special_patterns[i].handler_code;
+          } else {
+             dead_pattern[i] = true;
+          }
+        }
+      }
+    pattern_pos++;
+    }
+
+    AppendMIR(cur_block, insn);
+
+    code_ptr += width;
+    int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+
+    int df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
+
+    if (df_flags & DF_HAS_DEFS) {
+      def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
+    }
+
+    if (flags & Instruction::kBranch) {
+      cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
+                                   width, flags, code_ptr, code_end);
+    } else if (flags & Instruction::kReturn) {
+      cur_block->terminated_by_return = true;
+      cur_block->fall_through = exit_block_;
+      exit_block_->predecessors->Insert(cur_block);
+      /*
+       * Terminate the current block if there are instructions
+       * afterwards.
+       */
+      if (code_ptr < code_end) {
+        /*
+         * Create a fallthrough block for real instructions
+         * (incl. NOP).
+         */
+        if (ContentIsInsn(code_ptr)) {
+            FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
+                      /* immed_pred_block_p */ NULL);
+        }
+      }
+    } else if (flags & Instruction::kThrow) {
+      cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
+                                  code_ptr, code_end);
+    } else if (flags & Instruction::kSwitch) {
+      ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
+    }
+    current_offset_ += width;
+    BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
+                                      false, /* immed_pred_block_p */ NULL);
+    if (next_block) {
+      /*
+       * The next instruction could be the target of a previously parsed
+       * forward branch so a block is already created. If the current
+       * instruction is not an unconditional branch, connect them through
+       * the fall-through link.
+       */
+      DCHECK(cur_block->fall_through == NULL ||
+             cur_block->fall_through == next_block ||
+             cur_block->fall_through == exit_block_);
+
+      if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
+        cur_block->fall_through = next_block;
+        next_block->predecessors->Insert(cur_block);
+      }
+      cur_block = next_block;
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/1_post_parse_cfg/", true);
+  }
+
+  if (cu_->verbose) {
+    DumpMIRGraph();
+  }
+}
+
+void MIRGraph::ShowOpcodeStats()
+{
+  DCHECK(opcode_count_ != NULL);
+  LOG(INFO) << "Opcode Count";
+  for (int i = 0; i < kNumPackedOpcodes; i++) {
+    if (opcode_count_[i] != 0) {
+      LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
+                << " " << opcode_count_[i];
+    }
+  }
+}
+
+// TODO: use a configurable base prefix, and adjust callers to supply pass name.
+/* Dump the CFG into a DOT graph */
+void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks)
+{
+  FILE* file;
+  std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
+  ReplaceSpecialChars(fname);
+  fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
+                      GetEntryBlock()->fall_through->start_offset);
+  file = fopen(fname.c_str(), "w");
+  if (file == NULL) {
+    return;
+  }
+  fprintf(file, "digraph G {\n");
+
+  fprintf(file, "  rankdir=TB\n");
+
+  int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
+  int idx;
+
+  for (idx = 0; idx < num_blocks; idx++) {
+    int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
+    BasicBlock *bb = GetBasicBlock(block_idx);
+    if (bb == NULL) break;
+    if (bb->block_type == kDead) continue;
+    if (bb->block_type == kEntryBlock) {
+      fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
+    } else if (bb->block_type == kExitBlock) {
+      fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
+    } else if (bb->block_type == kDalvikByteCode) {
+      fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
+              bb->start_offset, bb->id);
+      const MIR *mir;
+        fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
+                bb->first_mir_insn ? " | " : " ");
+        for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+            int opcode = mir->dalvikInsn.opcode;
+            fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
+                    mir->ssa_rep ? GetDalvikDisassembly(mir) :
+                    (opcode < kMirOpFirst) ?  Instruction::Name(mir->dalvikInsn.opcode) :
+                    extended_mir_op_names_[opcode - kMirOpFirst],
+                    (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
+                    (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
+                    mir->next ? " | " : " ");
+        }
+        fprintf(file, "  }\"];\n\n");
+    } else if (bb->block_type == kExceptionHandling) {
+      char block_name[BLOCK_NAME_LEN];
+
+      GetBlockName(bb, block_name);
+      fprintf(file, "  %s [shape=invhouse];\n", block_name);
+    }
+
+    char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+
+    if (bb->taken) {
+      GetBlockName(bb, block_name1);
+      GetBlockName(bb->taken, block_name2);
+      fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
+              block_name1, block_name2);
+    }
+    if (bb->fall_through) {
+      GetBlockName(bb, block_name1);
+      GetBlockName(bb->fall_through, block_name2);
+      fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
+    }
+
+    if (bb->successor_block_list.block_list_type != kNotUsed) {
+      fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
+              bb->start_offset, bb->id,
+              (bb->successor_block_list.block_list_type == kCatch) ?
+               "Mrecord" : "record");
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
+
+      int succ_id = 0;
+      while (true) {
+        if (successor_block_info == NULL) break;
+
+        BasicBlock *dest_block = successor_block_info->block;
+        SuccessorBlockInfo *next_successor_block_info = iterator.Next();
+
+        fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
+                succ_id++,
+                successor_block_info->key,
+                dest_block->start_offset,
+                (next_successor_block_info != NULL) ? " | " : " ");
+
+        successor_block_info = next_successor_block_info;
+      }
+      fprintf(file, "  }\"];\n\n");
+
+      GetBlockName(bb, block_name1);
+      fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
+              block_name1, bb->start_offset, bb->id);
+
+      if (bb->successor_block_list.block_list_type == kPackedSwitch ||
+          bb->successor_block_list.block_list_type == kSparseSwitch) {
+
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+
+        succ_id = 0;
+        while (true) {
+          SuccessorBlockInfo *successor_block_info = iter.Next();
+          if (successor_block_info == NULL) break;
+
+          BasicBlock *dest_block = successor_block_info->block;
+
+          GetBlockName(dest_block, block_name2);
+          fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
+                  bb->id, succ_id++, block_name2);
+        }
+      }
+    }
+    fprintf(file, "\n");
+
+    if (cu_->verbose) {
+      /* Display the dominator tree */
+      GetBlockName(bb, block_name1);
+      fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
+              block_name1, block_name1);
+      if (bb->i_dom) {
+        GetBlockName(bb->i_dom, block_name2);
+        fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
+      }
+    }
+  }
+  fprintf(file, "}\n");
+  fclose(file);
+}
+
+/* Insert an MIR instruction to the end of a basic block */
+void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->last_mir_insn->next = mir;
+    mir->prev = bb->last_mir_insn;
+    mir->next = NULL;
+    bb->last_mir_insn = mir;
+  }
+}
+
+/* Insert an MIR instruction to the head of a basic block */
+void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->first_mir_insn->prev = mir;
+    mir->next = bb->first_mir_insn;
+    mir->prev = NULL;
+    bb->first_mir_insn = mir;
+  }
+}
+
+/* Insert a MIR instruction after the specified MIR */
+void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
+{
+  new_mir->prev = current_mir;
+  new_mir->next = current_mir->next;
+  current_mir->next = new_mir;
+
+  if (new_mir->next) {
+    /* Is not the last MIR in the block */
+    new_mir->next->prev = new_mir;
+  } else {
+    /* Is the last MIR in the block */
+    bb->last_mir_insn = new_mir;
+  }
+}
+
+char* MIRGraph::GetDalvikDisassembly(const MIR* mir)
+{
+  DecodedInstruction insn = mir->dalvikInsn;
+  std::string str;
+  int flags = 0;
+  int opcode = insn.opcode;
+  char* ret;
+  bool nop = false;
+  SSARepresentation* ssa_rep = mir->ssa_rep;
+  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
+  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+
+  // Handle special cases.
+  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
+    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
+    str.append(": ");
+    // Recover the original Dex instruction
+    insn = mir->meta.throw_insn->dalvikInsn;
+    ssa_rep = mir->meta.throw_insn->ssa_rep;
+    defs = ssa_rep->num_defs;
+    uses = ssa_rep->num_uses;
+    opcode = insn.opcode;
+  } else if (opcode == kMirOpNop) {
+    str.append("[");
+    insn.opcode = mir->meta.original_opcode;
+    opcode = mir->meta.original_opcode;
+    nop = true;
+  }
+
+  if (opcode >= kMirOpFirst) {
+    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
+  } else {
+    dalvik_format = Instruction::FormatOf(insn.opcode);
+    flags = Instruction::FlagsOf(insn.opcode);
+    str.append(Instruction::Name(insn.opcode));
+  }
+
+  if (opcode == kMirOpPhi) {
+    int* incoming = reinterpret_cast<int*>(insn.vB);
+    str.append(StringPrintf(" %s = (%s",
+               GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
+               GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
+    str.append(StringPrintf(":%d",incoming[0]));
+    int i;
+    for (i = 1; i < uses; i++) {
+      str.append(StringPrintf(", %s:%d",
+                              GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
+                              incoming[i]));
+    }
+    str.append(")");
+  } else if ((flags & Instruction::kBranch) != 0) {
+    // For branches, decode the instructions to print out the branch targets.
+    int offset = 0;
+    switch (dalvik_format) {
+      case Instruction::k21t:
+        str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
+        offset = insn.vB;
+        break;
+      case Instruction::k22t:
+        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
+                   GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
+        offset = insn.vC;
+        break;
+      case Instruction::k10t:
+      case Instruction::k20t:
+      case Instruction::k30t:
+        offset = insn.vA;
+        break;
+      default:
+        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+    }
+    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
+                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+  } else {
+    // For invokes-style formats, treat wide regs as a pair of singles
+    bool show_singles = ((dalvik_format == Instruction::k35c) ||
+                         (dalvik_format == Instruction::k3rc));
+    if (defs != 0) {
+      str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str()));
+      if (uses != 0) {
+        str.append(", ");
+      }
+    }
+    for (int i = 0; i < uses; i++) {
+      str.append(
+          StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str()));
+      if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+        // For the listing, skip the high sreg.
+        i++;
+      }
+      if (i != (uses -1)) {
+        str.append(",");
+      }
+    }
+    switch (dalvik_format) {
+      case Instruction::k11n: // Add one immediate from vB
+      case Instruction::k21s:
+      case Instruction::k31i:
+      case Instruction::k21h:
+        str.append(StringPrintf(", #%d", insn.vB));
+        break;
+      case Instruction::k51l: // Add one wide immediate
+        str.append(StringPrintf(", #%lld", insn.vB_wide));
+        break;
+      case Instruction::k21c: // One register, one string/type/method index
+      case Instruction::k31c:
+        str.append(StringPrintf(", index #%d", insn.vB));
+        break;
+      case Instruction::k22c: // Two registers, one string/type/method index
+        str.append(StringPrintf(", index #%d", insn.vC));
+        break;
+      case Instruction::k22s: // Add one immediate from vC
+      case Instruction::k22b:
+        str.append(StringPrintf(", #%d", insn.vC));
+        break;
+      default:
+        ; // Nothing left to print
+      }
+  }
+  if (nop) {
+    str.append("]--optimized away");
+  }
+  int length = str.length() + 1;
+  ret = static_cast<char*>(arena_->NewMem(length, false, ArenaAllocator::kAllocDFInfo));
+  strncpy(ret, str.c_str(), length);
+  return ret;
+}
+
+/* Turn method name into a legal Linux file name */
+void MIRGraph::ReplaceSpecialChars(std::string& str)
+{
+  static const struct { const char before; const char after; } match[] =
+      {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
+       {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
+  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
+    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
+  }
+}
+
+std::string MIRGraph::GetSSAName(int ssa_reg)
+{
+  // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to
+  //       the arena. We should be smarter and just place straight into the arena, or compute the
+  //       value more lazily.
+  return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+}
+
+// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
+std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only)
+{
+  if (reg_location_ == NULL) {
+    // Pre-SSA - just use the standard name
+    return GetSSAName(ssa_reg);
+  }
+  if (IsConst(reg_location_[ssa_reg])) {
+    if (!singles_only && reg_location_[ssa_reg].wide) {
+      return StringPrintf("v%d_%d#0x%llx", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
+                          ConstantValueWide(reg_location_[ssa_reg]));
+    } else {
+      return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg),GetSSASubscript(ssa_reg),
+                          ConstantValue(reg_location_[ssa_reg]));
+    }
+  } else {
+    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+  }
+}
+
+void MIRGraph::GetBlockName(BasicBlock* bb, char* name)
+{
+  switch (bb->block_type) {
+    case kEntryBlock:
+      snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
+      break;
+    case kExitBlock:
+      snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
+      break;
+    case kDalvikByteCode:
+      snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
+      break;
+    case kExceptionHandling:
+      snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
+               bb->id);
+      break;
+    default:
+      snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
+      break;
+  }
+}
+
+const char* MIRGraph::GetShortyFromTargetIdx(int target_idx)
+{
+  // FIXME: use current code unit for inline support.
+  const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
+  return cu_->dex_file->GetShorty(method_id.proto_idx_);
+}
+
+/* Debug Utility - dump a compilation unit */
+void MIRGraph::DumpMIRGraph()
+{
+  BasicBlock* bb;
+  const char* block_type_names[] = {
+    "Entry Block",
+    "Code Block",
+    "Exit Block",
+    "Exception Handling",
+    "Catch Block"
+  };
+
+  LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  LOG(INFO) << cu_->insns << " insns";
+  LOG(INFO) << GetNumBlocks() << " blocks in total";
+  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
+
+  while (true) {
+    bb = iterator.Next();
+    if (bb == NULL) break;
+    LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
+        bb->id,
+        block_type_names[bb->block_type],
+        bb->start_offset,
+        bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
+        bb->last_mir_insn ? "" : " empty");
+    if (bb->taken) {
+      LOG(INFO) << "  Taken branch: block " << bb->taken->id
+                << "(0x" << std::hex << bb->taken->start_offset << ")";
+    }
+    if (bb->fall_through) {
+      LOG(INFO) << "  Fallthrough : block " << bb->fall_through->id
+                << " (0x" << std::hex << bb->fall_through->start_offset << ")";
+    }
+  }
+}
+
+/*
+ * Build an array of location records for the incoming arguments.
+ * Note: one location record per word of arguments, with dummy
+ * high-word loc for wide arguments.  Also pull up any following
+ * MOVE_RESULT and incorporate it into the invoke.
+ */
+CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
+                                  bool is_range)
+{
+  CallInfo* info = static_cast<CallInfo*>(arena_->NewMem(sizeof(CallInfo), true,
+                                                         ArenaAllocator::kAllocMisc));
+  MIR* move_result_mir = FindMoveResult(bb, mir);
+  if (move_result_mir == NULL) {
+    info->result.location = kLocInvalid;
+  } else {
+    info->result = GetRawDest(move_result_mir);
+    move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
+    move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+  }
+  info->num_arg_words = mir->ssa_rep->num_uses;
+  info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+      (arena_->NewMem(sizeof(RegLocation) * info->num_arg_words, false,
+                      ArenaAllocator::kAllocMisc));
+  for (int i = 0; i < info->num_arg_words; i++) {
+    info->args[i] = GetRawSrc(mir, i);
+  }
+  info->opt_flags = mir->optimization_flags;
+  info->type = type;
+  info->is_range = is_range;
+  info->index = mir->dalvikInsn.vB;
+  info->offset = mir->offset;
+  return info;
+}
+
+// Allocate a new basic block.
+BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id)
+{
+  BasicBlock* bb = static_cast<BasicBlock*>(arena_->NewMem(sizeof(BasicBlock), true,
+                                                           ArenaAllocator::kAllocBB));
+  bb->block_type = block_type;
+  bb->id = block_id;
+  // TUNING: better estimate of the exit block predecessors?
+  bb->predecessors = new (arena_)
+      GrowableArray<BasicBlock*>(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors);
+  bb->successor_block_list.block_list_type = kNotUsed;
+  block_id_map_.Put(block_id, block_id);
+  return bb;
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
new file mode 100644
index 0000000..2b1c21f
--- /dev/null
+++ b/compiler/dex/mir_graph.h
@@ -0,0 +1,670 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_MIRGRAPH_H_
+#define ART_SRC_COMPILER_DEX_MIRGRAPH_H_
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "compiler_ir.h"
+#include "arena_bit_vector.h"
+#include "growable_array.h"
+
+namespace art {
+
+enum DataFlowAttributePos {
+  kUA = 0,
+  kUB,
+  kUC,
+  kAWide,
+  kBWide,
+  kCWide,
+  kDA,
+  kIsMove,
+  kSetsConst,
+  kFormat35c,
+  kFormat3rc,
+  kNullCheckSrc0,        // Null check of uses[0].
+  kNullCheckSrc1,        // Null check of uses[1].
+  kNullCheckSrc2,        // Null check of uses[2].
+  kNullCheckOut0,        // Null check out outgoing arg0.
+  kDstNonNull,           // May assume dst is non-null.
+  kRetNonNull,           // May assume retval is non-null.
+  kNullTransferSrc0,     // Object copy src[0] -> dst.
+  kNullTransferSrcN,     // Phi null check state transfer.
+  kRangeCheckSrc1,       // Range check of uses[1].
+  kRangeCheckSrc2,       // Range check of uses[2].
+  kRangeCheckSrc3,       // Range check of uses[3].
+  kFPA,
+  kFPB,
+  kFPC,
+  kCoreA,
+  kCoreB,
+  kCoreC,
+  kRefA,
+  kRefB,
+  kRefC,
+  kUsesMethodStar,       // Implicit use of Method*.
+};
+
+#define DF_NOP                  0
+#define DF_UA                   (1 << kUA)
+#define DF_UB                   (1 << kUB)
+#define DF_UC                   (1 << kUC)
+#define DF_A_WIDE               (1 << kAWide)
+#define DF_B_WIDE               (1 << kBWide)
+#define DF_C_WIDE               (1 << kCWide)
+#define DF_DA                   (1 << kDA)
+#define DF_IS_MOVE              (1 << kIsMove)
+#define DF_SETS_CONST           (1 << kSetsConst)
+#define DF_FORMAT_35C           (1 << kFormat35c)
+#define DF_FORMAT_3RC           (1 << kFormat3rc)
+#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST         (1 << kDstNonNull)
+#define DF_NON_NULL_RET         (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
+#define DF_FP_A                 (1 << kFPA)
+#define DF_FP_B                 (1 << kFPB)
+#define DF_FP_C                 (1 << kFPC)
+#define DF_CORE_A               (1 << kCoreA)
+#define DF_CORE_B               (1 << kCoreB)
+#define DF_CORE_C               (1 << kCoreC)
+#define DF_REF_A                (1 << kRefA)
+#define DF_REF_B                (1 << kRefB)
+#define DF_REF_C                (1 << kRefC)
+#define DF_UMS                  (1 << kUsesMethodStar)
+
+#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS             (DF_DA)
+
+#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
+                                 DF_NULL_CHK_1 | \
+                                 DF_NULL_CHK_2 | \
+                                 DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
+                                 DF_RANGE_CHK_2 | \
+                                 DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
+                                 DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG             (DF_UA | DF_DA)
+#define DF_B_IS_REG             (DF_UB)
+#define DF_C_IS_REG             (DF_UC)
+#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
+
+enum OatMethodAttributes {
+  kIsLeaf,            // Method is leaf.
+  kHasLoop,           // Method contains simple loop.
+};
+
+#define METHOD_IS_LEAF          (1 << kIsLeaf)
+#define METHOD_HAS_LOOP         (1 << kHasLoop)
+
+// Minimum field size to contain Dalvik v_reg number.
+#define VREG_NUM_WIDTH 16
+
+#define INVALID_SREG (-1)
+#define INVALID_VREG (0xFFFFU)
+#define INVALID_REG (0xFF)
+#define INVALID_OFFSET (0xDEADF00FU)
+
+/* SSA encodings for special registers */
+#define SSA_METHOD_BASEREG (-2)
+/* First compiler temp basereg, grows smaller */
+#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
+
+#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
+#define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
+#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
+#define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
+#define MIR_INLINED                     (1 << kMIRInlined)
+#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
+#define MIR_CALLEE                      (1 << kMIRCallee)
+#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
+#define MIR_DUP                         (1 << kMIRDup)
+
+#define BLOCK_NAME_LEN 80
+
+/*
+ * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
+ * it is useful to have compiler-generated temporary registers and have them treated
+ * in the same manner as dx-generated virtual registers.  This struct records the SSA
+ * name of compiler-introduced temporaries.
+ */
+struct CompilerTemp {
+  int s_reg;
+};
+
+// When debug option enabled, records effectiveness of null and range check elimination.
+struct Checkstats {
+  int null_checks;
+  int null_checks_eliminated;
+  int range_checks;
+  int range_checks_eliminated;
+};
+
+// Dataflow attributes of a basic block.
+struct BasicBlockDataFlow {
+  ArenaBitVector* use_v;
+  ArenaBitVector* def_v;
+  ArenaBitVector* live_in_v;
+  ArenaBitVector* phi_v;
+  int* vreg_to_ssa_map;
+  ArenaBitVector* ending_null_check_v;
+};
+
+/*
+ * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
+ * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
+ * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
+ * Following SSA renaming, this is the primary struct used by code generators to locate
+ * operand and result registers.  This is a somewhat confusing and unhelpful convention that
+ * we may want to revisit in the future.
+ */
+struct SSARepresentation {
+  int num_uses;
+  int* uses;
+  bool* fp_use;
+  int num_defs;
+  int* defs;
+  bool* fp_def;
+};
+
+/*
+ * The Midlevel Intermediate Representation node, which may be largely considered a
+ * wrapper around a Dalvik byte code.
+ */
+struct MIR {
+  DecodedInstruction dalvikInsn;
+  unsigned int width;
+  unsigned int offset;
+  int m_unit_index;               // From which method was this MIR included
+  MIR* prev;
+  MIR* next;
+  SSARepresentation* ssa_rep;
+  int optimization_flags;
+  union {
+    // Establish link between two halves of throwing instructions.
+    MIR* throw_insn;
+    // Saved opcode for NOP'd MIRs
+    Instruction::Code original_opcode;
+  } meta;
+};
+
+struct SuccessorBlockInfo;
+
+struct BasicBlock {
+  int id;
+  int dfs_id;
+  bool visited;
+  bool hidden;
+  bool catch_entry;
+  bool explicit_throw;
+  bool conditional_branch;
+  bool terminated_by_return;        // Block ends with a Dalvik return opcode.
+  bool dominates_return;            // Is a member of return extended basic block.
+  uint16_t start_offset;
+  uint16_t nesting_depth;
+  BBType block_type;
+  MIR* first_mir_insn;
+  MIR* last_mir_insn;
+  BasicBlock* fall_through;
+  BasicBlock* taken;
+  BasicBlock* i_dom;                // Immediate dominator.
+  BasicBlockDataFlow* data_flow_info;
+  GrowableArray<BasicBlock*>* predecessors;
+  ArenaBitVector* dominators;
+  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
+  ArenaBitVector* dom_frontier;     // Dominance frontier.
+  struct {                          // For one-to-many successors like.
+    BlockListType block_list_type;  // switch and exception handling.
+    GrowableArray<SuccessorBlockInfo*>* blocks;
+  } successor_block_list;
+};
+
+/*
+ * The "blocks" field in "successor_block_list" points to an array of elements with the type
+ * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
+ * blocks, key is the case value.
+ */
+// TODO: make class with placement new.
+struct SuccessorBlockInfo {
+  BasicBlock* block;
+  int key;
+};
+
+/*
+ * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
+ * the type of an SSA name (and, can also be used by code generators to record where the
+ * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
+ * there is a RegLocation.
+ * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
+ * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
+ */
+struct RegLocation {
+  RegLocationType location:3;
+  unsigned wide:1;
+  unsigned defined:1;   // Do we know the type?
+  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
+  unsigned fp:1;        // Floating point?
+  unsigned core:1;      // Non-floating point?
+  unsigned ref:1;       // Something GC cares about.
+  unsigned high_word:1; // High word of pair?
+  unsigned home:1;      // Does this represent the home location?
+  uint8_t low_reg;      // First physical register.
+  uint8_t high_reg;     // 2nd physical register (if wide).
+  int32_t s_reg_low;    // SSA name for low Dalvik word.
+  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
+                        // and consolodate usage w/ s_reg_low.
+};
+
+/*
+ * Collection of information describing an invoke, and the destination of
+ * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
+ * more efficient invoke code generation.
+ */
+struct CallInfo {
+  int num_arg_words;    // Note: word count, not arg count.
+  RegLocation* args;    // One for each word of arguments.
+  RegLocation result;   // Eventual target of MOVE_RESULT.
+  int opt_flags;
+  InvokeType type;
+  uint32_t dex_idx;
+  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  RegLocation target;    // Target of following move_result.
+  bool skip_this;
+  bool is_range;
+  int offset;            // Dalvik offset.
+};
+
+
+const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+                             INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
+
+class MIRGraph {
+ public:
+  MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
+  ~MIRGraph();
+
+  /*
+   * Parse dex method and add MIR at current insert point.  Returns id (which is
+   * actually the index of the method in the m_units_ array).
+   */
+  void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                    InvokeType invoke_type, uint32_t class_def_idx,
+                    uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
+
+  /* Find existing block */
+  BasicBlock* FindBlock(unsigned int code_offset) {
+    return FindBlock(code_offset, false, false, NULL);
+  }
+
+  const uint16_t* GetCurrentInsns() const {
+    return current_code_item_->insns_;
+  }
+
+  const uint16_t* GetInsns(int m_unit_index) const {
+    return m_units_[m_unit_index]->GetCodeItem()->insns_;
+  }
+
+  int GetNumBlocks() const {
+    return num_blocks_;
+  }
+
+  ArenaBitVector* GetTryBlockAddr() const {
+    return try_block_addr_;
+  }
+
+  BasicBlock* GetEntryBlock() const {
+    return entry_block_;
+  }
+
+  BasicBlock* GetExitBlock() const {
+    return exit_block_;
+  }
+
+  BasicBlock* GetBasicBlock(int block_id) const {
+    return block_list_.Get(block_id);
+  }
+
+  size_t GetBasicBlockListCount() const {
+    return block_list_.Size();
+  }
+
+  GrowableArray<BasicBlock*>* GetBlockList() {
+    return &block_list_;
+  }
+
+  GrowableArray<int>* GetDfsOrder() {
+    return dfs_order_;
+  }
+
+  GrowableArray<int>* GetDfsPostOrder() {
+    return dfs_post_order_;
+  }
+
+  GrowableArray<int>* GetDomPostOrder() {
+    return dom_post_order_traversal_;
+  }
+
+  int GetDefCount() const {
+    return def_count_;
+  }
+
+  ArenaAllocator* GetArena() {
+    return arena_;
+  }
+
+  void EnableOpcodeCounting() {
+    opcode_count_ = static_cast<int*>(arena_->NewMem(kNumPackedOpcodes * sizeof(int), true,
+                                      ArenaAllocator::kAllocMisc));
+  }
+
+  void ShowOpcodeStats();
+
+  DexCompilationUnit* GetCurrentDexCompilationUnit() const {
+    return m_units_[current_method_];
+  }
+
+  void DumpCFG(const char* dir_prefix, bool all_blocks);
+
+  void BuildRegLocations();
+
+  void DumpRegLocTable(RegLocation* table, int count);
+
+  void BasicBlockOptimization();
+
+  bool IsConst(int32_t s_reg) const {
+    return is_constant_v_->IsBitSet(s_reg);
+  }
+
+  bool IsConst(RegLocation loc) const {
+    return (IsConst(loc.orig_sreg));
+  }
+
+  int32_t ConstantValue(RegLocation loc) const {
+    DCHECK(IsConst(loc));
+    return constant_values_[loc.orig_sreg];
+  }
+
+  int32_t ConstantValue(int32_t s_reg) const {
+    DCHECK(IsConst(s_reg));
+    return constant_values_[s_reg];
+  }
+
+  int64_t ConstantValueWide(RegLocation loc) const {
+    DCHECK(IsConst(loc));
+    return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
+        Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
+  }
+
+  bool IsConstantNullRef(RegLocation loc) const {
+    return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
+  }
+
+  int GetNumSSARegs() const {
+    return num_ssa_regs_;
+  }
+
+  void SetNumSSARegs(int new_num) {
+    num_ssa_regs_ = new_num;
+  }
+
+  unsigned int GetNumReachableBlocks() const {
+    return num_reachable_blocks_;
+  }
+
+  int GetUseCount(int vreg) const {
+    return use_counts_.Get(vreg);
+  }
+
+  int GetRawUseCount(int vreg) const {
+    return raw_use_counts_.Get(vreg);
+  }
+
+  int GetSSASubscript(int ssa_reg) const {
+    return ssa_subscripts_->Get(ssa_reg);
+  }
+
+  RegLocation GetRawSrc(MIR* mir, int num)
+  {
+    DCHECK(num < mir->ssa_rep->num_uses);
+    RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
+    return res;
+  }
+
+  RegLocation GetRawDest(MIR* mir)
+  {
+    DCHECK_GT(mir->ssa_rep->num_defs, 0);
+    RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
+    return res;
+  }
+
+  RegLocation GetDest(MIR* mir)
+  {
+    RegLocation res = GetRawDest(mir);
+    DCHECK(!res.wide);
+    return res;
+  }
+
+  RegLocation GetSrc(MIR* mir, int num)
+  {
+    RegLocation res = GetRawSrc(mir, num);
+    DCHECK(!res.wide);
+    return res;
+  }
+
+  RegLocation GetDestWide(MIR* mir)
+  {
+    RegLocation res = GetRawDest(mir);
+    DCHECK(res.wide);
+    return res;
+  }
+
+  RegLocation GetSrcWide(MIR* mir, int low)
+  {
+    RegLocation res = GetRawSrc(mir, low);
+    DCHECK(res.wide);
+    return res;
+  }
+
+  RegLocation GetBadLoc() {
+    return bad_loc;
+  }
+
+  int GetMethodSReg() {
+    return method_sreg_;
+  }
+
+  bool MethodIsLeaf() {
+    return attributes_ & METHOD_IS_LEAF;
+  }
+
+  RegLocation GetRegLocation(int index) {
+    DCHECK((index >= 0) && (index > num_ssa_regs_));
+    return reg_location_[index];
+  }
+
+  RegLocation GetMethodLoc() {
+    return reg_location_[method_sreg_];
+  }
+
+  void BasicBlockCombine();
+  void CodeLayout();
+  void DumpCheckStats();
+  void PropagateConstants();
+  MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
+  int SRegToVReg(int ssa_reg) const;
+  void VerifyDataflow();
+  void MethodUseCount();
+  void SSATransformation();
+  void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
+  void NullCheckElimination();
+  bool SetFp(int index, bool is_fp);
+  bool SetCore(int index, bool is_core);
+  bool SetRef(int index, bool is_ref);
+  bool SetWide(int index, bool is_wide);
+  bool SetHigh(int index, bool is_high);
+  void AppendMIR(BasicBlock* bb, MIR* mir);
+  void PrependMIR(BasicBlock* bb, MIR* mir);
+  void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
+  char* GetDalvikDisassembly(const MIR* mir);
+  void ReplaceSpecialChars(std::string& str);
+  std::string GetSSAName(int ssa_reg);
+  std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
+  void GetBlockName(BasicBlock* bb, char* name);
+  const char* GetShortyFromTargetIdx(int);
+  void DumpMIRGraph();
+  CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
+  BasicBlock* NewMemBB(BBType block_type, int block_id);
+
+  /*
+   * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
+   * we can verify that all catch entries have native PC entries.
+   */
+   std::set<uint32_t> catches_;
+
+   // TODO: make these private.
+   RegLocation* reg_location_;                         // Map SSA names to location.
+   GrowableArray<CompilerTemp*> compiler_temps_;
+   SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
+
+   static const int oat_data_flow_attributes_[kMirOpLast];
+   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
+
+ private:
+
+   int FindCommonParent(int block1, int block2);
+   void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
+                          const ArenaBitVector* src2);
+   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
+                        ArenaBitVector* live_in_v, int dalvik_reg_id);
+   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
+   void CompilerInitializeSSAConversion();
+   bool DoSSAConversion(BasicBlock* bb);
+   bool InvokeUsesMethodStar(MIR* mir);
+   int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
+   bool ContentIsInsn(const uint16_t* code_ptr);
+   BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
+                          BasicBlock** immed_pred_block_p);
+   BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
+                         BasicBlock** immed_pred_block_p);
+   void ProcessTryCatchBlocks();
+   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                int flags, const uint16_t* code_ptr, const uint16_t* code_end);
+   void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
+   BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
+                               const uint16_t* code_end);
+   int AddNewSReg(int v_reg);
+   void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
+   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
+   void DataFlowSSAFormat35C(MIR* mir);
+   void DataFlowSSAFormat3RC(MIR* mir);
+   bool FindLocalLiveIn(BasicBlock* bb);
+   void ClearAllVisitedFlags();
+   bool CountUses(struct BasicBlock* bb);
+   bool InferTypeAndSize(BasicBlock* bb);
+   bool VerifyPredInfo(BasicBlock* bb);
+   BasicBlock* NeedsVisit(BasicBlock* bb);
+   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
+   void MarkPreOrder(BasicBlock* bb);
+   void RecordDFSOrders(BasicBlock* bb);
+   void ComputeDFSOrders();
+   void ComputeDefBlockMatrix();
+   void ComputeDomPostOrderTraversal(BasicBlock* bb);
+   void ComputeDominators();
+   void InsertPhiNodes();
+   void DoDFSPreOrderSSARename(BasicBlock* block);
+   void SetConstant(int32_t ssa_reg, int value);
+   void SetConstantWide(int ssa_reg, int64_t value);
+   int GetSSAUseCount(int s_reg);
+   bool BasicBlockOpt(BasicBlock* bb);
+   bool EliminateNullChecks(BasicBlock* bb);
+   void NullCheckEliminationInit(BasicBlock* bb);
+   bool BuildExtendedBBList(struct BasicBlock* bb);
+   bool FillDefBlockMatrix(BasicBlock* bb);
+   void InitializeDominationInfo(BasicBlock* bb);
+   bool ComputeblockIDom(BasicBlock* bb);
+   bool ComputeBlockDominators(BasicBlock* bb);
+   bool SetDominators(BasicBlock* bb);
+   bool ComputeBlockLiveIns(BasicBlock* bb);
+   bool InsertPhiNodeOperands(BasicBlock* bb);
+   bool ComputeDominanceFrontier(BasicBlock* bb);
+   void DoConstantPropogation(BasicBlock* bb);
+   void CountChecks(BasicBlock* bb);
+   bool CombineBlocks(BasicBlock* bb);
+
+   CompilationUnit* const cu_;
+   GrowableArray<int>* ssa_base_vregs_;
+   GrowableArray<int>* ssa_subscripts_;
+   // Map original Dalvik virtual reg i to the current SSA name.
+   int* vreg_to_ssa_map_;            // length == method->registers_size
+   int* ssa_last_defs_;              // length == method->registers_size
+   ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
+   int* constant_values_;            // length == num_ssa_reg
+   // Use counts of ssa names.
+   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
+   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
+   unsigned int num_reachable_blocks_;
+   GrowableArray<int>* dfs_order_;
+   GrowableArray<int>* dfs_post_order_;
+   GrowableArray<int>* dom_post_order_traversal_;
+   int* i_dom_list_;
+   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
+   ArenaBitVector* temp_block_v_;
+   ArenaBitVector* temp_dalvik_register_v_;
+   ArenaBitVector* temp_ssa_register_v_;  // num_ssa_regs.
+   static const int kInvalidEntry = -1;
+   GrowableArray<BasicBlock*> block_list_;
+   ArenaBitVector* try_block_addr_;
+   BasicBlock* entry_block_;
+   BasicBlock* exit_block_;
+   BasicBlock* cur_block_;
+   int num_blocks_;
+   const DexFile::CodeItem* current_code_item_;
+   SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
+   std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
+   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
+   std::vector<MIRLocation> method_stack_;        // Include stack
+   int current_method_;
+   int current_offset_;
+   int def_count_;                                // Used to estimate size of ssa name storage.
+   int* opcode_count_;                            // Dex opcode coverage stats.
+   int num_ssa_regs_;                             // Number of names following SSA transformation.
+   std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
+   int method_sreg_;
+   unsigned int attributes_;
+   Checkstats* checkstats_;
+   ArenaAllocator* arena_;
+};
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_MIRGRAPH_H_
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
new file mode 100644
index 0000000..6b8f3f0
--- /dev/null
+++ b/compiler/dex/mir_optimization.cc
@@ -0,0 +1,893 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "local_value_numbering.h"
+#include "dataflow_iterator-inl.h"
+
+namespace art {
+
+static unsigned int Predecessors(BasicBlock* bb)
+{
+  return bb->predecessors->Size();
+}
+
+/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
+void MIRGraph::SetConstant(int32_t ssa_reg, int value)
+{
+  is_constant_v_->SetBit(ssa_reg);
+  constant_values_[ssa_reg] = value;
+}
+
+void MIRGraph::SetConstantWide(int ssa_reg, int64_t value)
+{
+  is_constant_v_->SetBit(ssa_reg);
+  constant_values_[ssa_reg] = Low32Bits(value);
+  constant_values_[ssa_reg + 1] = High32Bits(value);
+}
+
+void MIRGraph::DoConstantPropogation(BasicBlock* bb)
+{
+  MIR* mir;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (!(df_attributes & DF_HAS_DEFS)) continue;
+
+    /* Handle instructions that set up constants directly */
+    if (df_attributes & DF_SETS_CONST) {
+      if (df_attributes & DF_DA) {
+        int32_t vB = static_cast<int32_t>(d_insn->vB);
+        switch (d_insn->opcode) {
+          case Instruction::CONST_4:
+          case Instruction::CONST_16:
+          case Instruction::CONST:
+            SetConstant(mir->ssa_rep->defs[0], vB);
+            break;
+          case Instruction::CONST_HIGH16:
+            SetConstant(mir->ssa_rep->defs[0], vB << 16);
+            break;
+          case Instruction::CONST_WIDE_16:
+          case Instruction::CONST_WIDE_32:
+            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
+            break;
+          case Instruction::CONST_WIDE:
+            SetConstantWide(mir->ssa_rep->defs[0],d_insn->vB_wide);
+            break;
+          case Instruction::CONST_WIDE_HIGH16:
+            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
+            break;
+          default:
+            break;
+        }
+      }
+      /* Handle instructions that set up constants directly */
+    } else if (df_attributes & DF_IS_MOVE) {
+      int i;
+
+      for (i = 0; i < mir->ssa_rep->num_uses; i++) {
+        if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
+      }
+      /* Move a register holding a constant to another register */
+      if (i == mir->ssa_rep->num_uses) {
+        SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
+        if (df_attributes & DF_A_WIDE) {
+          SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
+        }
+      }
+    }
+  }
+  /* TODO: implement code to handle arithmetic operations */
+}
+
+void MIRGraph::PropagateConstants()
+{
+  is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
+  constant_values_ = static_cast<int*>(arena_->NewMem(sizeof(int) * GetNumSSARegs(), true,
+                                                      ArenaAllocator::kAllocDFInfo));
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    DoConstantPropogation(bb);
+  }
+}
+
+/* Advance to next strictly dominated MIR node in an extended basic block */
+static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir)
+{
+  BasicBlock* bb = *p_bb;
+  if (mir != NULL) {
+    mir = mir->next;
+    if (mir == NULL) {
+      bb = bb->fall_through;
+      if ((bb == NULL) || Predecessors(bb) != 1) {
+        mir = NULL;
+      } else {
+      *p_bb = bb;
+      mir = bb->first_mir_insn;
+      }
+    }
+  }
+  return mir;
+}
+
+/*
+ * To be used at an invoke mir.  If the logically next mir node represents
+ * a move-result, return it.  Else, return NULL.  If a move-result exists,
+ * it is required to immediately follow the invoke with no intervening
+ * opcodes or incoming arcs.  However, if the result of the invoke is not
+ * used, a move-result may not be present.
+ */
+MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir)
+{
+  BasicBlock* tbb = bb;
+  mir = AdvanceMIR(&tbb, mir);
+  while (mir != NULL) {
+    int opcode = mir->dalvikInsn.opcode;
+    if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
+        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
+        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
+      break;
+    }
+    // Keep going if pseudo op, otherwise terminate
+    if (opcode < kNumPackedOpcodes) {
+      mir = NULL;
+    } else {
+      mir = AdvanceMIR(&tbb, mir);
+    }
+  }
+  return mir;
+}
+
+static BasicBlock* NextDominatedBlock(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) {
+    return NULL;
+  }
+  DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+      || (bb->block_type == kExitBlock));
+  bb = bb->fall_through;
+  if (bb == NULL || (Predecessors(bb) != 1)) {
+    return NULL;
+  }
+  DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
+  return bb;
+}
+
+static MIR* FindPhi(BasicBlock* bb, int ssa_name)
+{
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
+      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+        if (mir->ssa_rep->uses[i] == ssa_name) {
+          return mir;
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+static SelectInstructionKind SelectKind(MIR* mir)
+{
+  switch (mir->dalvikInsn.opcode) {
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_OBJECT_FROM16:
+      return kSelectMove;
+   case Instruction::CONST:
+   case Instruction::CONST_4:
+   case Instruction::CONST_16:
+      return kSelectConst;
+   case Instruction::GOTO:
+   case Instruction::GOTO_16:
+   case Instruction::GOTO_32:
+      return kSelectGoto;
+   default:;
+  }
+  return kSelectNone;
+}
+
+int MIRGraph::GetSSAUseCount(int s_reg)
+{
+  return raw_use_counts_.Get(s_reg);
+}
+
+
+/* Do some MIR-level extended basic block optimizations */
+bool MIRGraph::BasicBlockOpt(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) {
+    return true;
+  }
+  int num_temps = 0;
+  LocalValueNumbering local_valnum(cu_);
+  while (bb != NULL) {
+    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+      // TUNING: use the returned value number for CSE.
+      local_valnum.GetValueNumber(mir);
+      // Look for interesting opcodes, skip otherwise
+      Instruction::Code opcode = mir->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::CMPL_FLOAT:
+        case Instruction::CMPL_DOUBLE:
+        case Instruction::CMPG_FLOAT:
+        case Instruction::CMPG_DOUBLE:
+        case Instruction::CMP_LONG:
+          if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
+            // Bitcode doesn't allow this optimization.
+            break;
+          }
+          if (mir->next != NULL) {
+            MIR* mir_next = mir->next;
+            Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
+            ConditionCode ccode = kCondNv;
+            switch(br_opcode) {
+              case Instruction::IF_EQZ:
+                ccode = kCondEq;
+                break;
+              case Instruction::IF_NEZ:
+                ccode = kCondNe;
+                break;
+              case Instruction::IF_LTZ:
+                ccode = kCondLt;
+                break;
+              case Instruction::IF_GEZ:
+                ccode = kCondGe;
+                break;
+              case Instruction::IF_GTZ:
+                ccode = kCondGt;
+                break;
+              case Instruction::IF_LEZ:
+                ccode = kCondLe;
+                break;
+              default:
+                break;
+            }
+            // Make sure result of cmp is used by next insn and nowhere else
+            if ((ccode != kCondNv) &&
+                (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
+                (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
+              mir_next->dalvikInsn.arg[0] = ccode;
+              switch(opcode) {
+                case Instruction::CMPL_FLOAT:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
+                  break;
+                case Instruction::CMPL_DOUBLE:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
+                  break;
+                case Instruction::CMPG_FLOAT:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
+                  break;
+                case Instruction::CMPG_DOUBLE:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
+                  break;
+                case Instruction::CMP_LONG:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpLong);
+                  break;
+                default: LOG(ERROR) << "Unexpected opcode: " << opcode;
+              }
+              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
+              mir_next->ssa_rep->uses = mir->ssa_rep->uses;
+              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
+              mir_next->ssa_rep->num_defs = 0;
+              mir->ssa_rep->num_uses = 0;
+              mir->ssa_rep->num_defs = 0;
+            }
+          }
+          break;
+        case Instruction::GOTO:
+        case Instruction::GOTO_16:
+        case Instruction::GOTO_32:
+        case Instruction::IF_EQ:
+        case Instruction::IF_NE:
+        case Instruction::IF_LT:
+        case Instruction::IF_GE:
+        case Instruction::IF_GT:
+        case Instruction::IF_LE:
+        case Instruction::IF_EQZ:
+        case Instruction::IF_NEZ:
+        case Instruction::IF_LTZ:
+        case Instruction::IF_GEZ:
+        case Instruction::IF_GTZ:
+        case Instruction::IF_LEZ:
+          if (bb->taken->dominates_return) {
+            mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
+            if (cu_->verbose) {
+              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
+            }
+          }
+          break;
+        default:
+          break;
+      }
+      // Is this the select pattern?
+      // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
+      // TUNING: expand to support IF_xx compare & branches
+      if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) &&
+          ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
+          (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
+        BasicBlock* ft = bb->fall_through;
+        DCHECK(ft != NULL);
+        BasicBlock* ft_ft = ft->fall_through;
+        BasicBlock* ft_tk = ft->taken;
+
+        BasicBlock* tk = bb->taken;
+        DCHECK(tk != NULL);
+        BasicBlock* tk_ft = tk->fall_through;
+        BasicBlock* tk_tk = tk->taken;
+
+        /*
+         * In the select pattern, the taken edge goes to a block that unconditionally
+         * transfers to the rejoin block and the fall_though edge goes to a block that
+         * unconditionally falls through to the rejoin block.
+         */
+        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
+          /*
+           * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
+           * suspend check on the taken-taken branch back to the join point.
+           */
+          if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
+              tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
+          }
+          // Are the block bodies something we can handle?
+          if ((ft->first_mir_insn == ft->last_mir_insn) &&
+              (tk->first_mir_insn != tk->last_mir_insn) &&
+              (tk->first_mir_insn->next == tk->last_mir_insn) &&
+              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
+              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
+              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
+              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
+            // Almost there.  Are the instructions targeting the same vreg?
+            MIR* if_true = tk->first_mir_insn;
+            MIR* if_false = ft->first_mir_insn;
+            // It's possible that the target of the select isn't used - skip those (rare) cases.
+            MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
+            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+              /*
+               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
+               * Phi node in the merge block and delete it (while using the SSA name
+               * of the merge as the target of the SELECT.  Delete both taken and
+               * fallthrough blocks, and set fallthrough to merge block.
+               * NOTE: not updating other dataflow info (no longer used at this point).
+               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
+               */
+              if (opcode == Instruction::IF_NEZ) {
+                // Normalize.
+                MIR* tmp_mir = if_true;
+                if_true = if_false;
+                if_false = tmp_mir;
+              }
+              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
+              bool const_form = (SelectKind(if_true) == kSelectConst);
+              if ((SelectKind(if_true) == kSelectMove)) {
+                if (IsConst(if_true->ssa_rep->uses[0]) &&
+                    IsConst(if_false->ssa_rep->uses[0])) {
+                    const_form = true;
+                    if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
+                    if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
+                }
+              }
+              if (const_form) {
+                // "true" set val in vB
+                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
+                // "false" set val in vC
+                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
+              } else {
+                DCHECK_EQ(SelectKind(if_true), kSelectMove);
+                DCHECK_EQ(SelectKind(if_false), kSelectMove);
+                int* src_ssa =
+                    static_cast<int*>(arena_->NewMem(sizeof(int) * 3, false,
+                                                     ArenaAllocator::kAllocDFInfo));
+                src_ssa[0] = mir->ssa_rep->uses[0];
+                src_ssa[1] = if_true->ssa_rep->uses[0];
+                src_ssa[2] = if_false->ssa_rep->uses[0];
+                mir->ssa_rep->uses = src_ssa;
+                mir->ssa_rep->num_uses = 3;
+              }
+              mir->ssa_rep->num_defs = 1;
+              mir->ssa_rep->defs =
+                  static_cast<int*>(arena_->NewMem(sizeof(int) * 1, false,
+                                                   ArenaAllocator::kAllocDFInfo));
+              mir->ssa_rep->fp_def =
+                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * 1, false,
+                                                    ArenaAllocator::kAllocDFInfo));
+              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+              // Match type of uses to def.
+              mir->ssa_rep->fp_use =
+                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * mir->ssa_rep->num_uses, false,
+                                                    ArenaAllocator::kAllocDFInfo));
+              for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+                mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
+              }
+              /*
+               * There is usually a Phi node in the join block for our two cases.  If the
+               * Phi node only contains our two cases as input, we will use the result
+               * SSA name of the Phi node as our select result and delete the Phi.  If
+               * the Phi node has more than two operands, we will arbitrarily use the SSA
+               * name of the "true" path, delete the SSA name of the "false" path from the
+               * Phi node (and fix up the incoming arc list).
+               */
+              if (phi->ssa_rep->num_uses == 2) {
+                mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
+                phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              } else {
+                int dead_def = if_false->ssa_rep->defs[0];
+                int live_def = if_true->ssa_rep->defs[0];
+                mir->ssa_rep->defs[0] = live_def;
+                int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                  if (phi->ssa_rep->uses[i] == live_def) {
+                    incoming[i] = bb->id;
+                  }
+                }
+                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                  if (phi->ssa_rep->uses[i] == dead_def) {
+                    int last_slot = phi->ssa_rep->num_uses - 1;
+                    phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
+                    incoming[i] = incoming[last_slot];
+                  }
+                }
+              }
+              phi->ssa_rep->num_uses--;
+              bb->taken = NULL;
+              tk->block_type = kDead;
+              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
+                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              }
+            }
+          }
+        }
+      }
+    }
+    bb = NextDominatedBlock(bb);
+  }
+
+  if (num_temps > cu_->num_compiler_temps) {
+    cu_->num_compiler_temps = num_temps;
+  }
+  return true;
+}
+
+void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info != NULL) {
+    bb->data_flow_info->ending_null_check_v =
+        new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck);
+  }
+}
+
+/* Collect stats on number of checks removed */
+void MIRGraph::CountChecks(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info != NULL) {
+    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+      if (mir->ssa_rep == NULL) {
+        continue;
+      }
+      int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+      if (df_attributes & DF_HAS_NULL_CHKS) {
+        checkstats_->null_checks++;
+        if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
+          checkstats_->null_checks_eliminated++;
+        }
+      }
+      if (df_attributes & DF_HAS_RANGE_CHKS) {
+        checkstats_->range_checks++;
+        if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
+          checkstats_->range_checks_eliminated++;
+        }
+      }
+    }
+  }
+}
+
+/* Try to make common case the fallthrough path */
+static bool LayoutBlocks(struct BasicBlock* bb)
+{
+  // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
+  if (!bb->explicit_throw) {
+    return false;
+  }
+  BasicBlock* walker = bb;
+  while (true) {
+    // Check termination conditions
+    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
+      break;
+    }
+    BasicBlock* prev = walker->predecessors->Get(0);
+    if (prev->conditional_branch) {
+      if (prev->fall_through == walker) {
+        // Already done - return
+        break;
+      }
+      DCHECK_EQ(walker, prev->taken);
+      // Got one.  Flip it and exit
+      Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
+        case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
+        case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
+        case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
+        case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
+        case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
+        case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
+        case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
+        case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
+        case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
+        case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
+        case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
+        default: LOG(FATAL) << "Unexpected opcode " << opcode;
+      }
+      prev->last_mir_insn->dalvikInsn.opcode = opcode;
+      BasicBlock* t_bb = prev->taken;
+      prev->taken = prev->fall_through;
+      prev->fall_through = t_bb;
+      break;
+    }
+    walker = prev;
+  }
+  return false;
+}
+
+/* Combine any basic blocks terminated by instructions that we now know can't throw */
+bool MIRGraph::CombineBlocks(struct BasicBlock* bb)
+{
+  // Loop here to allow combining a sequence of blocks
+  while (true) {
+    // Check termination conditions
+    if ((bb->first_mir_insn == NULL)
+        || (bb->data_flow_info == NULL)
+        || (bb->block_type == kExceptionHandling)
+        || (bb->block_type == kExitBlock)
+        || (bb->block_type == kDead)
+        || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
+        || (bb->successor_block_list.block_list_type != kNotUsed)
+        || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
+      break;
+    }
+
+    // Test the kMirOpCheck instruction
+    MIR* mir = bb->last_mir_insn;
+    // Grab the attributes from the paired opcode
+    MIR* throw_insn = mir->meta.throw_insn;
+    int df_attributes = oat_data_flow_attributes_[throw_insn->dalvikInsn.opcode];
+    bool can_combine = true;
+    if (df_attributes & DF_HAS_NULL_CHKS) {
+      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
+    }
+    if (df_attributes & DF_HAS_RANGE_CHKS) {
+      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+    }
+    if (!can_combine) {
+      break;
+    }
+    // OK - got one.  Combine
+    BasicBlock* bb_next = bb->fall_through;
+    DCHECK(!bb_next->catch_entry);
+    DCHECK_EQ(Predecessors(bb_next), 1U);
+    MIR* t_mir = bb->last_mir_insn->prev;
+    // Overwrite the kOpCheck insn with the paired opcode
+    DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
+    *bb->last_mir_insn = *throw_insn;
+    bb->last_mir_insn->prev = t_mir;
+    // Use the successor info from the next block
+    bb->successor_block_list = bb_next->successor_block_list;
+    // Use the ending block linkage from the next block
+    bb->fall_through = bb_next->fall_through;
+    bb->taken->block_type = kDead;  // Kill the unused exception block
+    bb->taken = bb_next->taken;
+    // Include the rest of the instructions
+    bb->last_mir_insn = bb_next->last_mir_insn;
+    /*
+     * If lower-half of pair of blocks to combine contained a return, move the flag
+     * to the newly combined block.
+     */
+    bb->terminated_by_return = bb_next->terminated_by_return;
+
+    /*
+     * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
+     * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
+     */
+
+    // Kill bb_next and remap now-dead id to parent
+    bb_next->block_type = kDead;
+    block_id_map_.Overwrite(bb_next->id, bb->id);
+
+    // Now, loop back and see if we can keep going
+  }
+  return false;
+}
+
+/* Eliminate unnecessary null checks for a basic block. */
+bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info == NULL) return false;
+
+  /*
+   * Set initial state.  Be conservative with catch
+   * blocks and start with no assumptions about null check
+   * status (except for "this").
+   */
+  if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+    temp_ssa_register_v_->ClearAllBits();
+    if ((cu_->access_flags & kAccStatic) == 0) {
+      // If non-static method, mark "this" as non-null
+      int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
+      temp_ssa_register_v_->SetBit(this_reg);
+    }
+  } else if (bb->predecessors->Size() == 1) {
+    BasicBlock* pred_bb = bb->predecessors->Get(0);
+    temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
+    if (pred_bb->block_type == kDalvikByteCode) {
+      // Check to see if predecessor had an explicit null-check.
+      MIR* last_insn = pred_bb->last_mir_insn;
+      Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
+      if (last_opcode == Instruction::IF_EQZ) {
+        if (pred_bb->fall_through == bb) {
+          // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
+          // it can't be null.
+          temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
+        }
+      } else if (last_opcode == Instruction::IF_NEZ) {
+        if (pred_bb->taken == bb) {
+          // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
+          // null.
+          temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
+        }
+      }
+    }
+  } else {
+    // Starting state is intersection of all incoming arcs
+    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+    BasicBlock* pred_bb = iter.Next();
+    DCHECK(pred_bb != NULL);
+    temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
+    while (true) {
+      pred_bb = iter.Next();
+      if (!pred_bb) break;
+      if ((pred_bb->data_flow_info == NULL) ||
+          (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
+        continue;
+      }
+      temp_ssa_register_v_->Intersect(pred_bb->data_flow_info->ending_null_check_v);
+    }
+  }
+
+  // Walk through the instruction in the block, updating as necessary
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (mir->ssa_rep == NULL) {
+        continue;
+    }
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+
+    // Mark target of NEW* as non-null
+    if (df_attributes & DF_NON_NULL_DST) {
+      temp_ssa_register_v_->SetBit(mir->ssa_rep->defs[0]);
+    }
+
+    // Mark non-null returns from invoke-style NEW*
+    if (df_attributes & DF_NON_NULL_RET) {
+      MIR* next_mir = mir->next;
+      // Next should be an MOVE_RESULT_OBJECT
+      if (next_mir &&
+          next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+        // Mark as null checked
+        temp_ssa_register_v_->SetBit(next_mir->ssa_rep->defs[0]);
+      } else {
+        if (next_mir) {
+          LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
+        } else if (bb->fall_through) {
+          // Look in next basic block
+          struct BasicBlock* next_bb = bb->fall_through;
+          for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
+            tmir =tmir->next) {
+            if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
+              continue;
+            }
+            // First non-pseudo should be MOVE_RESULT_OBJECT
+            if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+              // Mark as null checked
+              temp_ssa_register_v_->SetBit(tmir->ssa_rep->defs[0]);
+            } else {
+              LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
+            }
+            break;
+          }
+        }
+      }
+    }
+
+    /*
+     * Propagate nullcheck state on register copies (including
+     * Phi pseudo copies.  For the latter, nullcheck state is
+     * the "and" of all the Phi's operands.
+     */
+    if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
+      int tgt_sreg = mir->ssa_rep->defs[0];
+      int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
+          mir->ssa_rep->num_uses;
+      bool null_checked = true;
+      for (int i = 0; i < operands; i++) {
+        null_checked &= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]);
+      }
+      if (null_checked) {
+        temp_ssa_register_v_->SetBit(tgt_sreg);
+      }
+    }
+
+    // Already nullchecked?
+    if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+      int src_idx;
+      if (df_attributes & DF_NULL_CHK_1) {
+        src_idx = 1;
+      } else if (df_attributes & DF_NULL_CHK_2) {
+        src_idx = 2;
+      } else {
+        src_idx = 0;
+      }
+      int src_sreg = mir->ssa_rep->uses[src_idx];
+        if (temp_ssa_register_v_->IsBitSet(src_sreg)) {
+          // Eliminate the null check
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          // Mark s_reg as null-checked
+          temp_ssa_register_v_->SetBit(src_sreg);
+        }
+     }
+  }
+
+  // Did anything change?
+  bool changed = !temp_ssa_register_v_->Equal(bb->data_flow_info->ending_null_check_v);
+  if (changed) {
+    bb->data_flow_info->ending_null_check_v->Copy(temp_ssa_register_v_);
+  }
+  return changed;
+}
+
+void MIRGraph::NullCheckElimination()
+{
+  if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
+    DCHECK(temp_ssa_register_v_ != NULL);
+    AllNodesIterator iter(this, false /* not iterative */);
+    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+      NullCheckEliminationInit(bb);
+    }
+    PreOrderDfsIterator iter2(this, true /* iterative */);
+    bool change = false;
+    for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
+      change = EliminateNullChecks(bb);
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/4_post_nce_cfg/", false);
+  }
+}
+
+void MIRGraph::BasicBlockCombine()
+{
+  PreOrderDfsIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CombineBlocks(bb);
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/5_post_bbcombine_cfg/", false);
+  }
+}
+
+void MIRGraph::CodeLayout()
+{
+  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
+    VerifyDataflow();
+  }
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    LayoutBlocks(bb);
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/2_post_layout_cfg/", true);
+  }
+}
+
+void MIRGraph::DumpCheckStats()
+{
+  Checkstats* stats =
+      static_cast<Checkstats*>(arena_->NewMem(sizeof(Checkstats), true,
+                                              ArenaAllocator::kAllocDFInfo));
+  checkstats_ = stats;
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CountChecks(bb);
+  }
+  if (stats->null_checks > 0) {
+    float eliminated = static_cast<float>(stats->null_checks_eliminated);
+    float checks = static_cast<float>(stats->null_checks);
+    LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+              << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
+              << (eliminated/checks) * 100.0 << "%";
+    }
+  if (stats->range_checks > 0) {
+    float eliminated = static_cast<float>(stats->range_checks_eliminated);
+    float checks = static_cast<float>(stats->range_checks);
+    LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+              << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
+              << (eliminated/checks) * 100.0 << "%";
+  }
+}
+
+bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb)
+{
+  if (bb->visited) return false;
+  if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+      || (bb->block_type == kExitBlock))) {
+    // Ignore special blocks
+    bb->visited = true;
+    return false;
+  }
+  // Must be head of extended basic block.
+  BasicBlock* start_bb = bb;
+  extended_basic_blocks_.push_back(bb);
+  bool terminated_by_return = false;
+  // Visit blocks strictly dominated by this head.
+  while (bb != NULL) {
+    bb->visited = true;
+    terminated_by_return |= bb->terminated_by_return;
+    bb = NextDominatedBlock(bb);
+  }
+  if (terminated_by_return) {
+    // This extended basic block contains a return, so mark all members.
+    bb = start_bb;
+    while (bb != NULL) {
+      bb->dominates_return = true;
+      bb = NextDominatedBlock(bb);
+    }
+  }
+  return false; // Not iterative - return value will be ignored
+}
+
+
+void MIRGraph::BasicBlockOptimization()
+{
+  if (!(cu_->disable_opt & (1 << kBBOpt))) {
+    DCHECK_EQ(cu_->num_compiler_temps, 0);
+    ClearAllVisitedFlags();
+    PreOrderDfsIterator iter2(this, false /* not iterative */);
+    for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+      BuildExtendedBBList(bb);
+    }
+    // Perform extended basic block optimizations.
+    for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
+      BasicBlockOpt(extended_basic_blocks_[i]);
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/6_post_bbo_cfg/", false);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
new file mode 100644
index 0000000..2be1ef4
--- /dev/null
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -0,0 +1,2056 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "object_utils.h"
+
+#include <llvm/ADT/DepthFirstIterator.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/IR/Instruction.h>
+#include <llvm/IR/Instructions.h>
+#include <llvm/IR/Metadata.h>
+#include <llvm/IR/Type.h>
+#include <llvm/Support/Casting.h>
+#include <llvm/Support/InstIterator.h>
+#include <llvm/Support/ToolOutputFile.h>
+
+#include "dex/compiler_internals.h"
+#include "dex/dataflow_iterator-inl.h"
+#include "dex/frontend.h"
+#include "mir_to_gbc.h"
+
+#include "llvm/llvm_compilation_unit.h"
+#include "llvm/utils_llvm.h"
+
+const char* kLabelFormat = "%c0x%x_%d";
+const char kInvalidBlock = 0xff;
+const char kNormalBlock = 'L';
+const char kCatchBlock = 'C';
+
+namespace art {
+
+::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id)
+{
+  return id_to_block_map_.Get(id);
+}
+
+::llvm::Value* MirConverter::GetLLVMValue(int s_reg)
+{
+  return llvm_values_.Get(s_reg);
+}
+
+void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg)
+{
+  // Set vreg for debugging
+  art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg;
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  ::llvm::Value* table_slot = irb_->getInt32(v_reg);
+  ::llvm::Value* args[] = { table_slot, val };
+  irb_->CreateCall(func, args);
+}
+
+// Replace the placeholder value with the real definition
+void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg)
+{
+  ::llvm::Value* placeholder = GetLLVMValue(s_reg);
+  if (placeholder == NULL) {
+    // This can happen on instruction rewrite on verification failure
+    LOG(WARNING) << "Null placeholder";
+    return;
+  }
+  placeholder->replaceAllUsesWith(val);
+  val->takeName(placeholder);
+  llvm_values_.Put(s_reg, val);
+  ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
+  DCHECK(inst != NULL);
+  inst->eraseFromParent();
+
+}
+
+void MirConverter::DefineValue(::llvm::Value* val, int s_reg)
+{
+  DefineValueOnly(val, s_reg);
+  SetVregOnValue(val, s_reg);
+}
+
+::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc)
+{
+  ::llvm::Type* res = NULL;
+  if (loc.wide) {
+    if (loc.fp)
+        res = irb_->getDoubleTy();
+    else
+        res = irb_->getInt64Ty();
+  } else {
+    if (loc.fp) {
+      res = irb_->getFloatTy();
+    } else {
+      if (loc.ref)
+        res = irb_->getJObjectTy();
+      else
+        res = irb_->getInt32Ty();
+    }
+  }
+  return res;
+}
+
+void MirConverter::InitIR()
+{
+  if (llvm_info_ == NULL) {
+    CompilerTls* tls = cu_->compiler_driver->GetTls();
+    CHECK(tls != NULL);
+    llvm_info_ = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
+    if (llvm_info_ == NULL) {
+      llvm_info_ = new LLVMInfo();
+      tls->SetLLVMInfo(llvm_info_);
+    }
+  }
+  context_ = llvm_info_->GetLLVMContext();
+  module_ = llvm_info_->GetLLVMModule();
+  intrinsic_helper_ = llvm_info_->GetIntrinsicHelper();
+  irb_ = llvm_info_->GetIRBuilder();
+}
+
+::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr)
+{
+  BasicBlock* bb = mir_graph_->FindBlock(vaddr);
+  DCHECK(bb != NULL);
+  return GetLLVMBlock(bb->id);
+}
+
+void MirConverter::ConvertPackedSwitch(BasicBlock* bb,
+                                int32_t table_offset, RegLocation rl_src)
+{
+  const Instruction::PackedSwitchPayload* payload =
+      reinterpret_cast<const Instruction::PackedSwitchPayload*>(
+      cu_->insns + current_dalvik_offset_ + table_offset);
+
+  ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
+
+  ::llvm::SwitchInst* sw =
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+                             payload->case_count);
+
+  for (uint16_t i = 0; i < payload->case_count; ++i) {
+    ::llvm::BasicBlock* llvm_bb =
+        FindCaseTarget(current_dalvik_offset_ + payload->targets[i]);
+    sw->addCase(irb_->getInt32(payload->first_key + i), llvm_bb);
+  }
+  ::llvm::MDNode* switch_node =
+      ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
+  sw->setMetadata("SwitchTable", switch_node);
+  bb->taken = NULL;
+  bb->fall_through = NULL;
+}
+
+void MirConverter::ConvertSparseSwitch(BasicBlock* bb,
+                                int32_t table_offset, RegLocation rl_src)
+{
+  const Instruction::SparseSwitchPayload* payload =
+      reinterpret_cast<const Instruction::SparseSwitchPayload*>(
+      cu_->insns + current_dalvik_offset_ + table_offset);
+
+  const int32_t* keys = payload->GetKeys();
+  const int32_t* targets = payload->GetTargets();
+
+  ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
+
+  ::llvm::SwitchInst* sw =
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+                             payload->case_count);
+
+  for (size_t i = 0; i < payload->case_count; ++i) {
+    ::llvm::BasicBlock* llvm_bb =
+        FindCaseTarget(current_dalvik_offset_ + targets[i]);
+    sw->addCase(irb_->getInt32(keys[i]), llvm_bb);
+  }
+  ::llvm::MDNode* switch_node =
+      ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
+  sw->setMetadata("SwitchTable", switch_node);
+  bb->taken = NULL;
+  bb->fall_through = NULL;
+}
+
+void MirConverter::ConvertSget(int32_t field_index,
+                        art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
+{
+  ::llvm::Constant* field_idx = irb_->getInt32(field_index);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, field_idx);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertSput(int32_t field_index,
+                        art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(field_index));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
+}
+
+void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::HLFillArrayData;
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(offset));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
+}
+
+::llvm::Value* MirConverter::EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
+                              RegLocation loc)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  if (loc.wide) {
+    if (loc.fp) {
+      id = art::llvm::IntrinsicHelper::ConstDouble;
+    } else {
+      id = art::llvm::IntrinsicHelper::ConstLong;
+    }
+  } else {
+    if (loc.fp) {
+      id = art::llvm::IntrinsicHelper::ConstFloat;
+    } else if (loc.ref) {
+      id = art::llvm::IntrinsicHelper::ConstObj;
+    } else {
+      id = art::llvm::IntrinsicHelper::ConstInt;
+    }
+  }
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  return irb_->CreateCall(intr, src);
+}
+
+void MirConverter::EmitPopShadowFrame()
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
+      art::llvm::IntrinsicHelper::PopShadowFrame);
+  irb_->CreateCall(intr);
+}
+
+::llvm::Value* MirConverter::EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
+                             RegLocation loc)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  if (loc.wide) {
+    if (loc.fp) {
+      id = art::llvm::IntrinsicHelper::CopyDouble;
+    } else {
+      id = art::llvm::IntrinsicHelper::CopyLong;
+    }
+  } else {
+    if (loc.fp) {
+      id = art::llvm::IntrinsicHelper::CopyFloat;
+    } else if (loc.ref) {
+      id = art::llvm::IntrinsicHelper::CopyObj;
+    } else {
+      id = art::llvm::IntrinsicHelper::CopyInt;
+    }
+  }
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  return irb_->CreateCall(intr, src);
+}
+
+void MirConverter::ConvertMoveException(RegLocation rl_dest)
+{
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
+      art::llvm::IntrinsicHelper::GetException);
+  ::llvm::Value* res = irb_->CreateCall(func);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertThrow(RegLocation rl_src)
+{
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
+      art::llvm::IntrinsicHelper::HLThrowException);
+  irb_->CreateCall(func, src);
+}
+
+void MirConverter::ConvertMonitorEnterExit(int opt_flags,
+                                    art::llvm::IntrinsicHelper::IntrinsicId id,
+                                    RegLocation rl_src)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(func, args);
+}
+
+void MirConverter::ConvertArrayLength(int opt_flags,
+                               RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
+      art::llvm::IntrinsicHelper::OptArrayLength);
+  ::llvm::Value* res = irb_->CreateCall(func, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::EmitSuspendCheck()
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id =
+      art::llvm::IntrinsicHelper::CheckSuspend;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr);
+}
+
+::llvm::Value* MirConverter::ConvertCompare(ConditionCode cc,
+                                   ::llvm::Value* src1, ::llvm::Value* src2)
+{
+  ::llvm::Value* res = NULL;
+  DCHECK_EQ(src1->getType(), src2->getType());
+  switch(cc) {
+    case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
+    case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
+    case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
+    case kCondGe: res = irb_->CreateICmpSGE(src1, src2); break;
+    case kCondGt: res = irb_->CreateICmpSGT(src1, src2); break;
+    case kCondLe: res = irb_->CreateICmpSLE(src1, src2); break;
+    default: LOG(FATAL) << "Unexpected cc value " << cc;
+  }
+  return res;
+}
+
+void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir,
+                                    ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2)
+{
+  if (bb->taken->start_offset <= mir->offset) {
+    EmitSuspendCheck();
+  }
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
+  ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
+  cond_value->setName(StringPrintf("t%d", temp_name_++));
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
+                           GetLLVMBlock(bb->fall_through->id));
+  // Don't redo the fallthrough branch in the BB driver
+  bb->fall_through = NULL;
+}
+
+void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
+                                        MIR* mir, ConditionCode cc, RegLocation rl_src1)
+{
+  if (bb->taken->start_offset <= mir->offset) {
+    EmitSuspendCheck();
+  }
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2;
+  if (rl_src1.ref) {
+    src2 = irb_->getJNull();
+  } else {
+    src2 = irb_->getInt32(0);
+  }
+  ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
+                           GetLLVMBlock(bb->fall_through->id));
+  // Don't redo the fallthrough branch in the BB driver
+  bb->fall_through = NULL;
+}
+
+::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long,
+                                ::llvm::Value* src1, ::llvm::Value* src2)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  if (is_long) {
+    if (is_div) {
+      id = art::llvm::IntrinsicHelper::DivLong;
+    } else {
+      id = art::llvm::IntrinsicHelper::RemLong;
+    }
+  } else {
+    if (is_div) {
+      id = art::llvm::IntrinsicHelper::DivInt;
+    } else {
+      id = art::llvm::IntrinsicHelper::RemInt;
+    }
+  }
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2>args;
+  args.push_back(src1);
+  args.push_back(src2);
+  return irb_->CreateCall(intr, args);
+}
+
+::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
+                               ::llvm::Value* src1, ::llvm::Value* src2)
+{
+  ::llvm::Value* res = NULL;
+  switch(op) {
+    case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
+    case kOpSub: res = irb_->CreateSub(src1, src2); break;
+    case kOpRsub: res = irb_->CreateSub(src2, src1); break;
+    case kOpMul: res = irb_->CreateMul(src1, src2); break;
+    case kOpOr: res = irb_->CreateOr(src1, src2); break;
+    case kOpAnd: res = irb_->CreateAnd(src1, src2); break;
+    case kOpXor: res = irb_->CreateXor(src1, src2); break;
+    case kOpDiv: res = GenDivModOp(true, is_long, src1, src2); break;
+    case kOpRem: res = GenDivModOp(false, is_long, src1, src2); break;
+    case kOpLsl: res = irb_->CreateShl(src1, src2); break;
+    case kOpLsr: res = irb_->CreateLShr(src1, src2); break;
+    case kOpAsr: res = irb_->CreateAShr(src1, src2); break;
+    default:
+      LOG(FATAL) << "Invalid op " << op;
+  }
+  return res;
+}
+
+void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_src2)
+{
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
+  ::llvm::Value* res = NULL;
+  switch(op) {
+    case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
+    case kOpSub: res = irb_->CreateFSub(src1, src2); break;
+    case kOpMul: res = irb_->CreateFMul(src1, src2); break;
+    case kOpDiv: res = irb_->CreateFDiv(src1, src2); break;
+    case kOpRem: res = irb_->CreateFRem(src1, src2); break;
+    default:
+      LOG(FATAL) << "Invalid op " << op;
+  }
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
+                         RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2>args;
+  args.push_back(GetLLVMValue(rl_src1.orig_sreg));
+  args.push_back(GetLLVMValue(rl_src2.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
+                            RegLocation rl_dest, RegLocation rl_src, int shift_amount)
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2>args;
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(irb_->getInt32(shift_amount));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
+{
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
+  DCHECK_EQ(src1->getType(), src2->getType());
+  ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest,
+                              RegLocation rl_src1, int32_t imm)
+{
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = irb_->getInt32(imm);
+  ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+/*
+ * Process arguments for invoke.  Note: this code is also used to
+ * collect and process arguments for NEW_FILLED_ARRAY and NEW_FILLED_ARRAY_RANGE.
+ * The requirements are similar.
+ */
+void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir,
+                          InvokeType invoke_type, bool is_range, bool is_filled_new_array)
+{
+  CallInfo* info = mir_graph_->NewMemCallInfo(bb, mir, invoke_type, is_range);
+  ::llvm::SmallVector< ::llvm::Value*, 10> args;
+  // Insert the invoke_type
+  args.push_back(irb_->getInt32(static_cast<int>(invoke_type)));
+  // Insert the method_idx
+  args.push_back(irb_->getInt32(info->index));
+  // Insert the optimization flags
+  args.push_back(irb_->getInt32(info->opt_flags));
+  // Now, insert the actual arguments
+  for (int i = 0; i < info->num_arg_words;) {
+    ::llvm::Value* val = GetLLVMValue(info->args[i].orig_sreg);
+    args.push_back(val);
+    i += info->args[i].wide ? 2 : 1;
+  }
+  /*
+   * Choose the invoke return type based on actual usage.  Note: may
+   * be different than shorty.  For example, if a function return value
+   * is not used, we'll treat this as a void invoke.
+   */
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  if (is_filled_new_array) {
+    id = art::llvm::IntrinsicHelper::HLFilledNewArray;
+  } else if (info->result.location == kLocInvalid) {
+    id = art::llvm::IntrinsicHelper::HLInvokeVoid;
+  } else {
+    if (info->result.wide) {
+      if (info->result.fp) {
+        id = art::llvm::IntrinsicHelper::HLInvokeDouble;
+      } else {
+        id = art::llvm::IntrinsicHelper::HLInvokeLong;
+      }
+    } else if (info->result.ref) {
+        id = art::llvm::IntrinsicHelper::HLInvokeObj;
+    } else if (info->result.fp) {
+        id = art::llvm::IntrinsicHelper::HLInvokeFloat;
+    } else {
+        id = art::llvm::IntrinsicHelper::HLInvokeInt;
+    }
+  }
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  if (info->result.location != kLocInvalid) {
+    DefineValue(res, info->result.orig_sreg);
+  }
+}
+
+void MirConverter::ConvertConstObject(uint32_t idx,
+                               art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* index = irb_->getInt32(idx);
+  ::llvm::Value* res = irb_->CreateCall(intr, index);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::HLCheckCast;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  irb_->CreateCall(intr, args);
+}
+
+void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::NewInstance;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* index = irb_->getInt32(type_idx);
+  ::llvm::Value* res = irb_->CreateCall(intr, index);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertNewArray(uint32_t type_idx,
+                            RegLocation rl_dest, RegLocation rl_src)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::NewArray;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertAget(int opt_flags,
+                        art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 3> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  args.push_back(GetLLVMValue(rl_index.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertAput(int opt_flags,
+                        art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_src, RegLocation rl_array, RegLocation rl_index)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 4> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  args.push_back(GetLLVMValue(rl_index.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
+}
+
+void MirConverter::ConvertIget(int opt_flags,
+                        art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_dest, RegLocation rl_obj, int field_index)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 3> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_obj.orig_sreg));
+  args.push_back(irb_->getInt32(field_index));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertIput(int opt_flags,
+                        art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_src, RegLocation rl_obj, int field_index)
+{
+  ::llvm::SmallVector< ::llvm::Value*, 4> args;
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(GetLLVMValue(rl_obj.orig_sreg));
+  args.push_back(irb_->getInt32(field_index));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
+}
+
+void MirConverter::ConvertInstanceOf(uint32_t type_idx,
+                              RegLocation rl_dest, RegLocation rl_src)
+{
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::InstanceOf;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* res = irb_->CreateSExt(GetLLVMValue(rl_src.orig_sreg),
+                                            irb_->getInt64Ty());
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateTrunc(src, irb_->getInt32Ty());
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateFPExt(src, irb_->getDoubleTy());
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateFPTrunc(src, irb_->getFloatTy());
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
+                                         RegLocation rl_dest, RegLocation rl_src1,
+                                         RegLocation rl_src2)
+{
+  DCHECK_EQ(rl_src1.fp, rl_src2.fp);
+  DCHECK_EQ(rl_src1.wide, rl_src2.wide);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::SmallVector< ::llvm::Value*, 2> args;
+  args.push_back(GetLLVMValue(rl_src1.orig_sreg));
+  args.push_back(GetLLVMValue(rl_src2.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
+                                art::llvm::IntrinsicHelper::IntrinsicId id)
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res =
+      irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* res = irb_->CreateNeg(GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest,
+                           RegLocation rl_src)
+{
+  ::llvm::Value* res =
+      irb_->CreateSIToFP(GetLLVMValue(rl_src.orig_sreg), ty);
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
+                           RegLocation rl_dest,
+                    RegLocation rl_src)
+{
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+
+void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* res =
+      irb_->CreateFNeg(GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src)
+{
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateXor(src, static_cast<uint64_t>(-1));
+  DefineValue(res, rl_dest.orig_sreg);
+}
+
+void MirConverter::EmitConstructorBarrier() {
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
+      art::llvm::IntrinsicHelper::ConstructorBarrier);
+  irb_->CreateCall(intr);
+}
+
+/*
+ * Target-independent code generation.  Use only high-level
+ * load/store utilities here, or target-dependent genXX() handlers
+ * when necessary.
+ */
+bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb,
+                           ::llvm::BasicBlock* llvm_bb)
+{
+  bool res = false;   // Assume success
+  RegLocation rl_src[3];
+  RegLocation rl_dest = mir_graph_->GetBadLoc();
+  Instruction::Code opcode = mir->dalvikInsn.opcode;
+  int op_val = opcode;
+  uint32_t vB = mir->dalvikInsn.vB;
+  uint32_t vC = mir->dalvikInsn.vC;
+  int opt_flags = mir->optimization_flags;
+
+  if (cu_->verbose) {
+    if (op_val < kMirOpFirst) {
+      LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
+    } else {
+      LOG(INFO) << mir_graph_->extended_mir_op_names_[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
+    }
+  }
+
+  /* Prep Src and Dest locations */
+  int next_sreg = 0;
+  int next_loc = 0;
+  int attrs = mir_graph_->oat_data_flow_attributes_[opcode];
+  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
+  if (attrs & DF_UA) {
+    if (attrs & DF_A_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+      next_sreg+= 2;
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+      next_sreg++;
+    }
+  }
+  if (attrs & DF_UB) {
+    if (attrs & DF_B_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+      next_sreg+= 2;
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+      next_sreg++;
+    }
+  }
+  if (attrs & DF_UC) {
+    if (attrs & DF_C_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+    }
+  }
+  if (attrs & DF_DA) {
+    if (attrs & DF_A_WIDE) {
+      rl_dest = mir_graph_->GetDestWide(mir);
+    } else {
+      rl_dest = mir_graph_->GetDest(mir);
+    }
+  }
+
+  switch (opcode) {
+    case Instruction::NOP:
+      break;
+
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_OBJECT_FROM16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_WIDE:
+    case Instruction::MOVE_WIDE_16:
+    case Instruction::MOVE_WIDE_FROM16: {
+        /*
+         * Moves/copies are meaningless in pure SSA register form,
+         * but we need to preserve them for the conversion back into
+         * MIR (at least until we stop using the Dalvik register maps).
+         * Insert a dummy intrinsic copy call, which will be recognized
+         * by the quick path and removed by the portable path.
+         */
+        ::llvm::Value* src = GetLLVMValue(rl_src[0].orig_sreg);
+        ::llvm::Value* res = EmitCopy(src, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+
+    case Instruction::CONST:
+    case Instruction::CONST_4:
+    case Instruction::CONST_16: {
+        ::llvm::Constant* imm_value = irb_->getJInt(vB);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32: {
+        // Sign extend to 64 bits
+        int64_t imm = static_cast<int32_t>(vB);
+        ::llvm::Constant* imm_value = irb_->getJLong(imm);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+
+    case Instruction::CONST_HIGH16: {
+        ::llvm::Constant* imm_value = irb_->getJInt(vB << 16);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+
+    case Instruction::CONST_WIDE: {
+        ::llvm::Constant* imm_value =
+            irb_->getJLong(mir->dalvikInsn.vB_wide);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+    case Instruction::CONST_WIDE_HIGH16: {
+        int64_t imm = static_cast<int64_t>(vB) << 48;
+        ::llvm::Constant* imm_value = irb_->getJLong(imm);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
+      }
+      break;
+
+    case Instruction::SPUT_OBJECT:
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputObject,
+                  rl_src[0]);
+      break;
+    case Instruction::SPUT:
+      if (rl_src[0].fp) {
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputFloat,
+                    rl_src[0]);
+      } else {
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSput, rl_src[0]);
+      }
+      break;
+    case Instruction::SPUT_BOOLEAN:
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputBoolean,
+                  rl_src[0]);
+      break;
+    case Instruction::SPUT_BYTE:
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputByte, rl_src[0]);
+      break;
+    case Instruction::SPUT_CHAR:
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputChar, rl_src[0]);
+      break;
+    case Instruction::SPUT_SHORT:
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputShort, rl_src[0]);
+      break;
+    case Instruction::SPUT_WIDE:
+      if (rl_src[0].fp) {
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputDouble,
+                    rl_src[0]);
+      } else {
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputWide,
+                    rl_src[0]);
+      }
+      break;
+
+    case Instruction::SGET_OBJECT:
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetObject, rl_dest);
+      break;
+    case Instruction::SGET:
+      if (rl_dest.fp) {
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetFloat, rl_dest);
+      } else {
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSget, rl_dest);
+      }
+      break;
+    case Instruction::SGET_BOOLEAN:
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetBoolean, rl_dest);
+      break;
+    case Instruction::SGET_BYTE:
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetByte, rl_dest);
+      break;
+    case Instruction::SGET_CHAR:
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetChar, rl_dest);
+      break;
+    case Instruction::SGET_SHORT:
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetShort, rl_dest);
+      break;
+    case Instruction::SGET_WIDE:
+      if (rl_dest.fp) {
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetDouble,
+                    rl_dest);
+      } else {
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetWide, rl_dest);
+      }
+      break;
+
+    case Instruction::RETURN_WIDE:
+    case Instruction::RETURN:
+    case Instruction::RETURN_OBJECT: {
+        if (!mir_graph_->MethodIsLeaf()) {
+          EmitSuspendCheck();
+        }
+        EmitPopShadowFrame();
+        irb_->CreateRet(GetLLVMValue(rl_src[0].orig_sreg));
+        DCHECK(bb->terminated_by_return);
+      }
+      break;
+
+    case Instruction::RETURN_VOID: {
+        if (((cu_->access_flags & kAccConstructor) != 0) &&
+            cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(),
+                                                            cu_->dex_file,
+                                                            cu_->class_def_idx)) {
+          EmitConstructorBarrier();
+        }
+        if (!mir_graph_->MethodIsLeaf()) {
+          EmitSuspendCheck();
+        }
+        EmitPopShadowFrame();
+        irb_->CreateRetVoid();
+        DCHECK(bb->terminated_by_return);
+      }
+      break;
+
+    case Instruction::IF_EQ:
+      ConvertCompareAndBranch(bb, mir, kCondEq, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_NE:
+      ConvertCompareAndBranch(bb, mir, kCondNe, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_LT:
+      ConvertCompareAndBranch(bb, mir, kCondLt, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_GE:
+      ConvertCompareAndBranch(bb, mir, kCondGe, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_GT:
+      ConvertCompareAndBranch(bb, mir, kCondGt, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_LE:
+      ConvertCompareAndBranch(bb, mir, kCondLe, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::IF_EQZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondEq, rl_src[0]);
+      break;
+    case Instruction::IF_NEZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondNe, rl_src[0]);
+      break;
+    case Instruction::IF_LTZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondLt, rl_src[0]);
+      break;
+    case Instruction::IF_GEZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondGe, rl_src[0]);
+      break;
+    case Instruction::IF_GTZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondGt, rl_src[0]);
+      break;
+    case Instruction::IF_LEZ:
+      ConvertCompareZeroAndBranch(bb, mir, kCondLe, rl_src[0]);
+      break;
+
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32: {
+        if (bb->taken->start_offset <= bb->start_offset) {
+          EmitSuspendCheck();
+        }
+        irb_->CreateBr(GetLLVMBlock(bb->taken->id));
+      }
+      break;
+
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+    case Instruction::ADD_INT:
+    case Instruction::ADD_INT_2ADDR:
+      ConvertArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+    case Instruction::SUB_INT:
+    case Instruction::SUB_INT_2ADDR:
+      ConvertArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+    case Instruction::MUL_INT:
+    case Instruction::MUL_INT_2ADDR:
+      ConvertArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+    case Instruction::DIV_INT:
+    case Instruction::DIV_INT_2ADDR:
+      ConvertArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+    case Instruction::REM_INT:
+    case Instruction::REM_INT_2ADDR:
+      ConvertArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AND_LONG:
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_INT:
+    case Instruction::AND_INT_2ADDR:
+      ConvertArithOp(kOpAnd, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+    case Instruction::OR_INT:
+    case Instruction::OR_INT_2ADDR:
+      ConvertArithOp(kOpOr, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+    case Instruction::XOR_INT:
+    case Instruction::XOR_INT_2ADDR:
+      ConvertArithOp(kOpXor, rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::SHL_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::SHLLong,
+                    rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::SHL_INT:
+    case Instruction::SHL_INT_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::SHLInt,
+                   rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::SHR_LONG:
+    case Instruction::SHR_LONG_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::SHRLong,
+                   rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::SHR_INT:
+    case Instruction::SHR_INT_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::SHRInt,
+                   rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::USHR_LONG:
+    case Instruction::USHR_LONG_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::USHRLong,
+                   rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::USHR_INT:
+    case Instruction::USHR_INT_2ADDR:
+      ConvertShift(art::llvm::IntrinsicHelper::USHRInt,
+                   rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::ADD_INT_LIT16:
+    case Instruction::ADD_INT_LIT8:
+      ConvertArithOpLit(kOpAdd, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::RSUB_INT:
+    case Instruction::RSUB_INT_LIT8:
+      ConvertArithOpLit(kOpRsub, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::MUL_INT_LIT16:
+    case Instruction::MUL_INT_LIT8:
+      ConvertArithOpLit(kOpMul, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::DIV_INT_LIT16:
+    case Instruction::DIV_INT_LIT8:
+      ConvertArithOpLit(kOpDiv, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::REM_INT_LIT16:
+    case Instruction::REM_INT_LIT8:
+      ConvertArithOpLit(kOpRem, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::AND_INT_LIT16:
+    case Instruction::AND_INT_LIT8:
+      ConvertArithOpLit(kOpAnd, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::OR_INT_LIT16:
+    case Instruction::OR_INT_LIT8:
+      ConvertArithOpLit(kOpOr, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::XOR_INT_LIT16:
+    case Instruction::XOR_INT_LIT8:
+      ConvertArithOpLit(kOpXor, rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::SHL_INT_LIT8:
+      ConvertShiftLit(art::llvm::IntrinsicHelper::SHLInt,
+                      rl_dest, rl_src[0], vC & 0x1f);
+      break;
+    case Instruction::SHR_INT_LIT8:
+      ConvertShiftLit(art::llvm::IntrinsicHelper::SHRInt,
+                      rl_dest, rl_src[0], vC & 0x1f);
+      break;
+    case Instruction::USHR_INT_LIT8:
+      ConvertShiftLit(art::llvm::IntrinsicHelper::USHRInt,
+                      rl_dest, rl_src[0], vC & 0x1f);
+      break;
+
+    case Instruction::ADD_FLOAT:
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_DOUBLE:
+    case Instruction::ADD_DOUBLE_2ADDR:
+      ConvertFPArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::SUB_FLOAT:
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_DOUBLE:
+    case Instruction::SUB_DOUBLE_2ADDR:
+      ConvertFPArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::MUL_FLOAT:
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_DOUBLE:
+    case Instruction::MUL_DOUBLE_2ADDR:
+      ConvertFPArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::DIV_FLOAT:
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_DOUBLE:
+    case Instruction::DIV_DOUBLE_2ADDR:
+      ConvertFPArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::REM_FLOAT:
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_DOUBLE:
+    case Instruction::REM_DOUBLE_2ADDR:
+      ConvertFPArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::INVOKE_STATIC:
+      ConvertInvoke(bb, mir, kStatic, false /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::INVOKE_STATIC_RANGE:
+      ConvertInvoke(bb, mir, kStatic, true /*range*/,
+                    false /* NewFilledArray */);
+      break;
+
+    case Instruction::INVOKE_DIRECT:
+      ConvertInvoke(bb,  mir, kDirect, false /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::INVOKE_DIRECT_RANGE:
+      ConvertInvoke(bb, mir, kDirect, true /*range*/,
+                    false /* NewFilledArray */);
+      break;
+
+    case Instruction::INVOKE_VIRTUAL:
+      ConvertInvoke(bb, mir, kVirtual, false /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      ConvertInvoke(bb, mir, kVirtual, true /*range*/,
+                    false /* NewFilledArray */);
+      break;
+
+    case Instruction::INVOKE_SUPER:
+      ConvertInvoke(bb, mir, kSuper, false /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::INVOKE_SUPER_RANGE:
+      ConvertInvoke(bb, mir, kSuper, true /*range*/,
+                    false /* NewFilledArray */);
+      break;
+
+    case Instruction::INVOKE_INTERFACE:
+      ConvertInvoke(bb, mir, kInterface, false /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::INVOKE_INTERFACE_RANGE:
+      ConvertInvoke(bb, mir, kInterface, true /*range*/,
+                    false /* NewFilledArray */);
+      break;
+    case Instruction::FILLED_NEW_ARRAY:
+      ConvertInvoke(bb, mir, kInterface, false /*range*/,
+                    true /* NewFilledArray */);
+      break;
+    case Instruction::FILLED_NEW_ARRAY_RANGE:
+      ConvertInvoke(bb, mir, kInterface, true /*range*/,
+                    true /* NewFilledArray */);
+      break;
+
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+      ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstString,
+                         rl_dest);
+      break;
+
+    case Instruction::CONST_CLASS:
+      ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstClass,
+                         rl_dest);
+      break;
+
+    case Instruction::CHECK_CAST:
+      ConvertCheckCast(vB, rl_src[0]);
+      break;
+
+    case Instruction::NEW_INSTANCE:
+      ConvertNewInstance(vB, rl_dest);
+      break;
+
+   case Instruction::MOVE_EXCEPTION:
+      ConvertMoveException(rl_dest);
+      break;
+
+   case Instruction::THROW:
+      ConvertThrow(rl_src[0]);
+      /*
+       * If this throw is standalone, terminate.
+       * If it might rethrow, force termination
+       * of the following block.
+       */
+      if (bb->fall_through == NULL) {
+        irb_->CreateUnreachable();
+      } else {
+        bb->fall_through->fall_through = NULL;
+        bb->fall_through->taken = NULL;
+      }
+      break;
+
+    case Instruction::MOVE_RESULT_WIDE:
+    case Instruction::MOVE_RESULT:
+    case Instruction::MOVE_RESULT_OBJECT:
+      /*
+       * All move_results should have been folded into the preceeding invoke.
+       */
+      LOG(FATAL) << "Unexpected move_result";
+      break;
+
+    case Instruction::MONITOR_ENTER:
+      ConvertMonitorEnterExit(opt_flags,
+                              art::llvm::IntrinsicHelper::MonitorEnter,
+                              rl_src[0]);
+      break;
+
+    case Instruction::MONITOR_EXIT:
+      ConvertMonitorEnterExit(opt_flags,
+                              art::llvm::IntrinsicHelper::MonitorExit,
+                              rl_src[0]);
+      break;
+
+    case Instruction::ARRAY_LENGTH:
+      ConvertArrayLength(opt_flags, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::NEW_ARRAY:
+      ConvertNewArray(vC, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INSTANCE_OF:
+      ConvertInstanceOf(vC, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::AGET:
+      if (rl_dest.fp) {
+        ConvertAget(opt_flags,
+                    art::llvm::IntrinsicHelper::HLArrayGetFloat,
+                    rl_dest, rl_src[0], rl_src[1]);
+      } else {
+        ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGet,
+                    rl_dest, rl_src[0], rl_src[1]);
+      }
+      break;
+    case Instruction::AGET_OBJECT:
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetObject,
+                  rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AGET_BOOLEAN:
+      ConvertAget(opt_flags,
+                  art::llvm::IntrinsicHelper::HLArrayGetBoolean,
+                  rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AGET_BYTE:
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetByte,
+                  rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AGET_CHAR:
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetChar,
+                  rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AGET_SHORT:
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetShort,
+                  rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::AGET_WIDE:
+      if (rl_dest.fp) {
+        ConvertAget(opt_flags,
+                    art::llvm::IntrinsicHelper::HLArrayGetDouble,
+                    rl_dest, rl_src[0], rl_src[1]);
+      } else {
+        ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetWide,
+                    rl_dest, rl_src[0], rl_src[1]);
+      }
+      break;
+
+    case Instruction::APUT:
+      if (rl_src[0].fp) {
+        ConvertAput(opt_flags,
+                    art::llvm::IntrinsicHelper::HLArrayPutFloat,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      } else {
+        ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPut,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      }
+      break;
+    case Instruction::APUT_OBJECT:
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutObject,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      break;
+    case Instruction::APUT_BOOLEAN:
+      ConvertAput(opt_flags,
+                  art::llvm::IntrinsicHelper::HLArrayPutBoolean,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      break;
+    case Instruction::APUT_BYTE:
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutByte,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      break;
+    case Instruction::APUT_CHAR:
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutChar,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      break;
+    case Instruction::APUT_SHORT:
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutShort,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      break;
+    case Instruction::APUT_WIDE:
+      if (rl_src[0].fp) {
+        ConvertAput(opt_flags,
+                    art::llvm::IntrinsicHelper::HLArrayPutDouble,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      } else {
+        ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutWide,
+                    rl_src[0], rl_src[1], rl_src[2]);
+      }
+      break;
+
+    case Instruction::IGET:
+      if (rl_dest.fp) {
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetFloat,
+                    rl_dest, rl_src[0], vC);
+      } else {
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGet,
+                    rl_dest, rl_src[0], vC);
+      }
+      break;
+    case Instruction::IGET_OBJECT:
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetObject,
+                  rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::IGET_BOOLEAN:
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetBoolean,
+                  rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::IGET_BYTE:
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetByte,
+                  rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::IGET_CHAR:
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetChar,
+                  rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::IGET_SHORT:
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetShort,
+                  rl_dest, rl_src[0], vC);
+      break;
+    case Instruction::IGET_WIDE:
+      if (rl_dest.fp) {
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetDouble,
+                    rl_dest, rl_src[0], vC);
+      } else {
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetWide,
+                    rl_dest, rl_src[0], vC);
+      }
+      break;
+    case Instruction::IPUT:
+      if (rl_src[0].fp) {
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutFloat,
+                    rl_src[0], rl_src[1], vC);
+      } else {
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPut,
+                    rl_src[0], rl_src[1], vC);
+      }
+      break;
+    case Instruction::IPUT_OBJECT:
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutObject,
+                  rl_src[0], rl_src[1], vC);
+      break;
+    case Instruction::IPUT_BOOLEAN:
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutBoolean,
+                  rl_src[0], rl_src[1], vC);
+      break;
+    case Instruction::IPUT_BYTE:
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutByte,
+                  rl_src[0], rl_src[1], vC);
+      break;
+    case Instruction::IPUT_CHAR:
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutChar,
+                  rl_src[0], rl_src[1], vC);
+      break;
+    case Instruction::IPUT_SHORT:
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutShort,
+                  rl_src[0], rl_src[1], vC);
+      break;
+    case Instruction::IPUT_WIDE:
+      if (rl_src[0].fp) {
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutDouble,
+                    rl_src[0], rl_src[1], vC);
+      } else {
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutWide,
+                    rl_src[0], rl_src[1], vC);
+      }
+      break;
+
+    case Instruction::FILL_ARRAY_DATA:
+      ConvertFillArrayData(vB, rl_src[0]);
+      break;
+
+    case Instruction::LONG_TO_INT:
+      ConvertLongToInt(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_LONG:
+      ConvertIntToLong(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_CHAR:
+      ConvertIntNarrowing(rl_dest, rl_src[0],
+                          art::llvm::IntrinsicHelper::IntToChar);
+      break;
+    case Instruction::INT_TO_BYTE:
+      ConvertIntNarrowing(rl_dest, rl_src[0],
+                          art::llvm::IntrinsicHelper::IntToByte);
+      break;
+    case Instruction::INT_TO_SHORT:
+      ConvertIntNarrowing(rl_dest, rl_src[0],
+                          art::llvm::IntrinsicHelper::IntToShort);
+      break;
+
+    case Instruction::INT_TO_FLOAT:
+    case Instruction::LONG_TO_FLOAT:
+      ConvertIntToFP(irb_->getFloatTy(), rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_DOUBLE:
+    case Instruction::LONG_TO_DOUBLE:
+      ConvertIntToFP(irb_->getDoubleTy(), rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::FLOAT_TO_DOUBLE:
+      ConvertFloatToDouble(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::DOUBLE_TO_FLOAT:
+      ConvertDoubleToFloat(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::NEG_LONG:
+    case Instruction::NEG_INT:
+      ConvertNeg(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::NEG_FLOAT:
+    case Instruction::NEG_DOUBLE:
+      ConvertNegFP(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::NOT_LONG:
+    case Instruction::NOT_INT:
+      ConvertNot(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::FLOAT_TO_INT:
+      ConvertFPToInt(art::llvm::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::DOUBLE_TO_INT:
+      ConvertFPToInt(art::llvm::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::FLOAT_TO_LONG:
+      ConvertFPToInt(art::llvm::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::DOUBLE_TO_LONG:
+      ConvertFPToInt(art::llvm::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::CMPL_FLOAT:
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmplFloat,
+                            rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::CMPG_FLOAT:
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgFloat,
+                            rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::CMPL_DOUBLE:
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmplDouble,
+                            rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::CMPG_DOUBLE:
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgDouble,
+                            rl_dest, rl_src[0], rl_src[1]);
+      break;
+    case Instruction::CMP_LONG:
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpLong,
+                            rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::PACKED_SWITCH:
+      ConvertPackedSwitch(bb, vB, rl_src[0]);
+      break;
+
+    case Instruction::SPARSE_SWITCH:
+      ConvertSparseSwitch(bb, vB, rl_src[0]);
+      break;
+
+    default:
+      UNIMPLEMENTED(FATAL) << "Unsupported Dex opcode 0x" << std::hex << opcode;
+      res = true;
+  }
+  return res;
+}
+
+void MirConverter::SetDexOffset(int32_t offset)
+{
+  current_dalvik_offset_ = offset;
+  ::llvm::SmallVector< ::llvm::Value*, 1> array_ref;
+  array_ref.push_back(irb_->getInt32(offset));
+  ::llvm::MDNode* node = ::llvm::MDNode::get(*context_, array_ref);
+  irb_->SetDexOffset(node);
+}
+
+// Attach method info as metadata to special intrinsic
+void MirConverter::SetMethodInfo()
+{
+  // We don't want dex offset on this
+  irb_->SetDexOffset(NULL);
+  art::llvm::IntrinsicHelper::IntrinsicId id;
+  id = art::llvm::IntrinsicHelper::MethodInfo;
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Instruction* inst = irb_->CreateCall(intr);
+  ::llvm::SmallVector< ::llvm::Value*, 2> reg_info;
+  reg_info.push_back(irb_->getInt32(cu_->num_ins));
+  reg_info.push_back(irb_->getInt32(cu_->num_regs));
+  reg_info.push_back(irb_->getInt32(cu_->num_outs));
+  reg_info.push_back(irb_->getInt32(cu_->num_compiler_temps));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumSSARegs()));
+  ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*context_, reg_info);
+  inst->setMetadata("RegInfo", reg_info_node);
+  SetDexOffset(current_dalvik_offset_);
+}
+
+void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb)
+{
+  SetDexOffset(bb->start_offset);
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    int opcode = mir->dalvikInsn.opcode;
+    if (opcode < kMirOpFirst) {
+      // Stop after first non-pseudo MIR op.
+      continue;
+    }
+    if (opcode != kMirOpPhi) {
+      // Skip other mir Pseudos.
+      continue;
+    }
+    RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
+    /*
+     * The Art compiler's Phi nodes only handle 32-bit operands,
+     * representing wide values using a matched set of Phi nodes
+     * for the lower and upper halves.  In the llvm world, we only
+     * want a single Phi for wides.  Here we will simply discard
+     * the Phi node representing the high word.
+     */
+    if (rl_dest.high_word) {
+      continue;  // No Phi node - handled via low word
+    }
+    int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
+    ::llvm::Type* phi_type =
+        LlvmTypeFromLocRec(rl_dest);
+    ::llvm::PHINode* phi = irb_->CreatePHI(phi_type, mir->ssa_rep->num_uses);
+    for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+      RegLocation loc;
+      // Don't check width here.
+      loc = mir_graph_->GetRawSrc(mir, i);
+      DCHECK_EQ(rl_dest.wide, loc.wide);
+      DCHECK_EQ(rl_dest.wide & rl_dest.high_word, loc.wide & loc.high_word);
+      DCHECK_EQ(rl_dest.fp, loc.fp);
+      DCHECK_EQ(rl_dest.core, loc.core);
+      DCHECK_EQ(rl_dest.ref, loc.ref);
+      SafeMap<unsigned int, unsigned int>::iterator it;
+      it = mir_graph_->block_id_map_.find(incoming[i]);
+      DCHECK(it != mir_graph_->block_id_map_.end());
+      DCHECK(GetLLVMValue(loc.orig_sreg) != NULL);
+      DCHECK(GetLLVMBlock(it->second) != NULL);
+      phi->addIncoming(GetLLVMValue(loc.orig_sreg),
+                       GetLLVMBlock(it->second));
+    }
+    DefineValueOnly(phi, rl_dest.orig_sreg);
+  }
+}
+
+/* Extended MIR instructions like PHI */
+void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir,
+                               ::llvm::BasicBlock* llvm_bb)
+{
+
+  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
+    case kMirOpPhi: {
+      // The llvm Phi node already emitted - just DefineValue() here.
+      RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
+      if (!rl_dest.high_word) {
+        // Only consider low word of pairs.
+        DCHECK(GetLLVMValue(rl_dest.orig_sreg) != NULL);
+        ::llvm::Value* phi = GetLLVMValue(rl_dest.orig_sreg);
+        if (1) SetVregOnValue(phi, rl_dest.orig_sreg);
+      }
+      break;
+    }
+    case kMirOpCopy: {
+      UNIMPLEMENTED(WARNING) << "unimp kMirOpPhi";
+      break;
+    }
+    case kMirOpNop:
+      if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
+          (bb->fall_through == NULL)) {
+        irb_->CreateUnreachable();
+      }
+      break;
+
+    // TODO: need GBC intrinsic to take advantage of fused operations
+    case kMirOpFusedCmplFloat:
+      UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpFloat unsupported";
+      break;
+    case kMirOpFusedCmpgFloat:
+      UNIMPLEMENTED(FATAL) << "kMirOpFusedCmgFloat unsupported";
+      break;
+    case kMirOpFusedCmplDouble:
+      UNIMPLEMENTED(FATAL) << "kMirOpFusedCmplDouble unsupported";
+      break;
+    case kMirOpFusedCmpgDouble:
+      UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpgDouble unsupported";
+      break;
+    case kMirOpFusedCmpLong:
+      UNIMPLEMENTED(FATAL) << "kMirOpLongCmpBranch unsupported";
+      break;
+    default:
+      break;
+  }
+}
+
+/* Handle the content in each basic block */
+bool MirConverter::BlockBitcodeConversion(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) return false;
+  ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(bb->id);
+  if (llvm_bb == NULL) {
+    CHECK(bb->block_type == kExitBlock);
+  } else {
+    irb_->SetInsertPoint(llvm_bb);
+    SetDexOffset(bb->start_offset);
+  }
+
+  if (cu_->verbose) {
+    LOG(INFO) << "................................";
+    LOG(INFO) << "Block id " << bb->id;
+    if (llvm_bb != NULL) {
+      LOG(INFO) << "label " << llvm_bb->getName().str().c_str();
+    } else {
+      LOG(INFO) << "llvm_bb is NULL";
+    }
+  }
+
+  if (bb->block_type == kEntryBlock) {
+    SetMethodInfo();
+
+    { // Allocate shadowframe.
+      art::llvm::IntrinsicHelper::IntrinsicId id =
+              art::llvm::IntrinsicHelper::AllocaShadowFrame;
+      ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+      ::llvm::Value* entries = irb_->getInt32(cu_->num_dalvik_registers);
+      irb_->CreateCall(func, entries);
+    }
+
+    { // Store arguments to vregs.
+      uint16_t arg_reg = cu_->num_regs;
+
+      ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
+      ::llvm::Function::arg_iterator arg_end(func_->arg_end());
+
+      const char* shorty = cu_->shorty;
+      uint32_t shorty_size = strlen(shorty);
+      CHECK_GE(shorty_size, 1u);
+
+      ++arg_iter; // skip method object
+
+      if ((cu_->access_flags & kAccStatic) == 0) {
+        SetVregOnValue(arg_iter, arg_reg);
+        ++arg_iter;
+        ++arg_reg;
+      }
+
+      for (uint32_t i = 1; i < shorty_size; ++i, ++arg_iter) {
+        SetVregOnValue(arg_iter, arg_reg);
+
+        ++arg_reg;
+        if (shorty[i] == 'J' || shorty[i] == 'D') {
+          // Wide types, such as long and double, are using a pair of registers
+          // to store the value, so we have to increase arg_reg again.
+          ++arg_reg;
+        }
+      }
+    }
+  } else if (bb->block_type == kExitBlock) {
+    /*
+     * Because of the differences between how MIR/LIR and llvm handle exit
+     * blocks, we won't explicitly covert them.  On the llvm-to-lir
+     * path, it will need to be regenereated.
+     */
+    return false;
+  } else if (bb->block_type == kExceptionHandling) {
+    /*
+     * Because we're deferring null checking, delete the associated empty
+     * exception block.
+     */
+    llvm_bb->eraseFromParent();
+    return false;
+  }
+
+  HandlePhiNodes(bb, llvm_bb);
+
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+
+    SetDexOffset(mir->offset);
+
+    int opcode = mir->dalvikInsn.opcode;
+    Instruction::Format dalvik_format =
+        Instruction::FormatOf(mir->dalvikInsn.opcode);
+
+    if (opcode == kMirOpCheck) {
+      // Combine check and work halves of throwing instruction.
+      MIR* work_half = mir->meta.throw_insn;
+      mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+      opcode = mir->dalvikInsn.opcode;
+      SSARepresentation* ssa_rep = work_half->ssa_rep;
+      work_half->ssa_rep = mir->ssa_rep;
+      mir->ssa_rep = ssa_rep;
+      work_half->meta.original_opcode = work_half->dalvikInsn.opcode;
+      work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+      if (bb->successor_block_list.block_list_type == kCatch) {
+        ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
+            art::llvm::IntrinsicHelper::CatchTargets);
+        ::llvm::Value* switch_key =
+            irb_->CreateCall(intr, irb_->getInt32(mir->offset));
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+        // New basic block to use for work half
+        ::llvm::BasicBlock* work_bb =
+            ::llvm::BasicBlock::Create(*context_, "", func_);
+        ::llvm::SwitchInst* sw =
+            irb_->CreateSwitch(switch_key, work_bb,
+                                     bb->successor_block_list.blocks->Size());
+        while (true) {
+          SuccessorBlockInfo *successor_block_info = iter.Next();
+          if (successor_block_info == NULL) break;
+          ::llvm::BasicBlock *target =
+              GetLLVMBlock(successor_block_info->block->id);
+          int type_index = successor_block_info->key;
+          sw->addCase(irb_->getInt32(type_index), target);
+        }
+        llvm_bb = work_bb;
+        irb_->SetInsertPoint(llvm_bb);
+      }
+    }
+
+    if (opcode >= kMirOpFirst) {
+      ConvertExtendedMIR(bb, mir, llvm_bb);
+      continue;
+    }
+
+    bool not_handled = ConvertMIRNode(mir, bb, llvm_bb);
+    if (not_handled) {
+      Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
+      LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
+                                   mir->offset, opcode,
+                                   Instruction::Name(dalvik_opcode),
+                                   dalvik_format);
+    }
+  }
+
+  if (bb->block_type == kEntryBlock) {
+    entry_target_bb_ = GetLLVMBlock(bb->fall_through->id);
+  } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
+    irb_->CreateBr(GetLLVMBlock(bb->fall_through->id));
+  }
+
+  return false;
+}
+
+char RemapShorty(char shorty_type) {
+  /*
+   * TODO: might want to revisit this.  Dalvik registers are 32-bits wide,
+   * and longs/doubles are represented as a pair of registers.  When sub-word
+   * arguments (and method results) are passed, they are extended to Dalvik
+   * virtual register containers.  Because llvm is picky about type consistency,
+   * we must either cast the "real" type to 32-bit container multiple Dalvik
+   * register types, or always use the expanded values.
+   * Here, we're doing the latter.  We map the shorty signature to container
+   * types (which is valid so long as we always do a real expansion of passed
+   * arguments and field loads).
+   */
+  switch(shorty_type) {
+    case 'Z' : shorty_type = 'I'; break;
+    case 'B' : shorty_type = 'I'; break;
+    case 'S' : shorty_type = 'I'; break;
+    case 'C' : shorty_type = 'I'; break;
+    default: break;
+  }
+  return shorty_type;
+}
+
+::llvm::FunctionType* MirConverter::GetFunctionType() {
+
+  // Get return type
+  ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0]));
+
+  // Get argument type
+  std::vector< ::llvm::Type*> args_type;
+
+  // method object
+  args_type.push_back(irb_->getJMethodTy());
+
+  // Do we have  a "this"?
+  if ((cu_->access_flags & kAccStatic) == 0) {
+    args_type.push_back(irb_->getJObjectTy());
+  }
+
+  for (uint32_t i = 1; i < strlen(cu_->shorty); ++i) {
+    args_type.push_back(irb_->getJType(RemapShorty(cu_->shorty[i])));
+  }
+
+  return ::llvm::FunctionType::get(ret_type, args_type, false);
+}
+
+bool MirConverter::CreateFunction() {
+  ::llvm::FunctionType* func_type = GetFunctionType();
+  if (func_type == NULL) {
+    return false;
+  }
+
+  func_ = ::llvm::Function::Create(func_type,
+                                      ::llvm::Function::InternalLinkage,
+                                      symbol_, module_);
+
+  ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
+  ::llvm::Function::arg_iterator arg_end(func_->arg_end());
+
+  arg_iter->setName("method");
+  ++arg_iter;
+
+  int start_sreg = cu_->num_regs;
+
+  for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
+    arg_iter->setName(StringPrintf("v%i_0", start_sreg));
+    start_sreg += mir_graph_->reg_location_[start_sreg].wide ? 2 : 1;
+  }
+
+  return true;
+}
+
+bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb)
+{
+  // Skip the exit block
+  if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) {
+    id_to_block_map_.Put(bb->id, NULL);
+  } else {
+    int offset = bb->start_offset;
+    bool entry_block = (bb->block_type == kEntryBlock);
+    ::llvm::BasicBlock* llvm_bb =
+        ::llvm::BasicBlock::Create(*context_, entry_block ? "entry" :
+                                 StringPrintf(kLabelFormat, bb->catch_entry ? kCatchBlock :
+                                              kNormalBlock, offset, bb->id), func_);
+    if (entry_block) {
+        entry_bb_ = llvm_bb;
+        placeholder_bb_ =
+            ::llvm::BasicBlock::Create(*context_, "placeholder",
+                                     func_);
+    }
+    id_to_block_map_.Put(bb->id, llvm_bb);
+  }
+  return false;
+}
+
+
+/*
+ * Convert MIR to LLVM_IR
+ *  o For each ssa name, create LLVM named value.  Type these
+ *    appropriately, and ignore high half of wide and double operands.
+ *  o For each MIR basic block, create an LLVM basic block.
+ *  o Iterate through the MIR a basic block at a time, setting arguments
+ *    to recovered ssa name.
+ */
+void MirConverter::MethodMIR2Bitcode()
+{
+  InitIR();
+
+  // Create the function
+  CreateFunction();
+
+  // Create an LLVM basic block for each MIR block in dfs preorder
+  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CreateLLVMBasicBlock(bb);
+  }
+
+  /*
+   * Create an llvm named value for each MIR SSA name.  Note: we'll use
+   * placeholders for all non-argument values (because we haven't seen
+   * the definition yet).
+   */
+  irb_->SetInsertPoint(placeholder_bb_);
+  ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
+  arg_iter++;  /* Skip path method */
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
+    ::llvm::Value* val;
+    RegLocation rl_temp = mir_graph_->reg_location_[i];
+    if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
+      llvm_values_.Insert(0);
+    } else if ((i < cu_->num_regs) ||
+               (i >= (cu_->num_regs + cu_->num_ins))) {
+      ::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
+         irb_->getJLong(0) : irb_->getJInt(0);
+      val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
+      val->setName(mir_graph_->GetSSAName(i));
+      llvm_values_.Insert(val);
+    } else {
+      // Recover previously-created argument values
+      ::llvm::Value* arg_val = arg_iter++;
+      llvm_values_.Insert(arg_val);
+    }
+  }
+
+  PreOrderDfsIterator iter2(mir_graph_, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    BlockBitcodeConversion(bb);
+  }
+
+  /*
+   * In a few rare cases of verification failure, the verifier will
+   * replace one or more Dalvik opcodes with the special
+   * throw-verification-failure opcode.  This can leave the SSA graph
+   * in an invalid state, as definitions may be lost, while uses retained.
+   * To work around this problem, we insert placeholder definitions for
+   * all Dalvik SSA regs in the "placeholder" block.  Here, after
+   * bitcode conversion is complete, we examine those placeholder definitions
+   * and delete any with no references (which normally is all of them).
+   *
+   * If any definitions remain, we link the placeholder block into the
+   * CFG.  Otherwise, it is deleted.
+   */
+  for (::llvm::BasicBlock::iterator it = placeholder_bb_->begin(),
+       it_end = placeholder_bb_->end(); it != it_end;) {
+    ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(it++);
+    DCHECK(inst != NULL);
+    ::llvm::Value* val = ::llvm::dyn_cast< ::llvm::Value>(inst);
+    DCHECK(val != NULL);
+    if (val->getNumUses() == 0) {
+      inst->eraseFromParent();
+    }
+  }
+  SetDexOffset(0);
+  if (placeholder_bb_->empty()) {
+    placeholder_bb_->eraseFromParent();
+  } else {
+    irb_->SetInsertPoint(placeholder_bb_);
+    irb_->CreateBr(entry_target_bb_);
+    entry_target_bb_ = placeholder_bb_;
+  }
+  irb_->SetInsertPoint(entry_bb_);
+  irb_->CreateBr(entry_target_bb_);
+
+  if (cu_->enable_debug & (1 << kDebugVerifyBitcode)) {
+     if (::llvm::verifyFunction(*func_, ::llvm::PrintMessageAction)) {
+       LOG(INFO) << "Bitcode verification FAILED for "
+                 << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                 << " of size " << cu_->code_item->insns_size_in_code_units_;
+       cu_->enable_debug |= (1 << kDebugDumpBitcodeFile);
+     }
+  }
+
+  if (cu_->enable_debug & (1 << kDebugDumpBitcodeFile)) {
+    // Write bitcode to file
+    std::string errmsg;
+    std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
+    mir_graph_->ReplaceSpecialChars(fname);
+    // TODO: make configurable change naming mechanism to avoid fname length issues.
+    fname = StringPrintf("/sdcard/Bitcode/%s.bc", fname.c_str());
+
+    if (fname.size() > 240) {
+      LOG(INFO) << "Warning: bitcode filename too long. Truncated.";
+      fname.resize(240);
+    }
+
+    ::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
+        new ::llvm::tool_output_file(fname.c_str(), errmsg,
+                                   ::llvm::raw_fd_ostream::F_Binary));
+
+    if (!errmsg.empty()) {
+      LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
+    }
+
+    ::llvm::WriteBitcodeToFile(module_, out_file->os());
+    out_file->keep();
+  }
+}
+
+Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               ArenaAllocator* const arena,
+                               llvm::LlvmCompilationUnit* const llvm_compilation_unit) {
+  return new MirConverter(cu, mir_graph, arena, llvm_compilation_unit);
+}
+
+}  // namespace art
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
new file mode 100644
index 0000000..8aa0271
--- /dev/null
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
+#define ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
+
+#include "invoke_type.h"
+#include "compiled_method.h"
+#include "dex/compiler_enums.h"
+#include "dex/compiler_ir.h"
+#include "dex/backend.h"
+#include "llvm/llvm_compilation_unit.h"
+#include "safe_map.h"
+
+namespace art {
+
+struct BasicBlock;
+struct CallInfo;
+struct CompilationUnit;
+struct MIR;
+struct RegLocation;
+struct RegisterInfo;
+class MIRGraph;
+
+// Target-specific initialization.
+Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               ArenaAllocator* const arena,
+                               llvm::LlvmCompilationUnit* const llvm_compilation_unit);
+
+class MirConverter : public Backend {
+
+  public:
+    // TODO: flesh out and integrate into new world order.
+    MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena,
+                 llvm::LlvmCompilationUnit* llvm_compilation_unit)
+      : Backend(arena),
+        cu_(cu),
+        mir_graph_(mir_graph),
+        llvm_compilation_unit_(llvm_compilation_unit),
+        llvm_info_(llvm_compilation_unit->GetQuickContext()),
+        symbol_(llvm_compilation_unit->GetDexCompilationUnit()->GetSymbol()),
+        context_(NULL),
+        module_(NULL),
+        func_(NULL),
+        intrinsic_helper_(NULL),
+        irb_(NULL),
+        placeholder_bb_(NULL),
+        entry_bb_(NULL),
+        entry_target_bb_(NULL),
+        llvm_values_(arena, mir_graph->GetNumSSARegs()),
+        temp_name_(0),
+        current_dalvik_offset_(0) {
+      if (kIsDebugBuild) {
+        cu->enable_debug |= (1 << kDebugVerifyBitcode);
+      }
+    }
+
+    void Materialize() {
+      MethodMIR2Bitcode();
+    }
+
+    CompiledMethod* GetCompiledMethod() {
+      return NULL;
+    }
+
+  private:
+    ::llvm::BasicBlock* GetLLVMBlock(int id);
+    ::llvm::Value* GetLLVMValue(int s_reg);
+    void SetVregOnValue(::llvm::Value* val, int s_reg);
+    void DefineValueOnly(::llvm::Value* val, int s_reg);
+    void DefineValue(::llvm::Value* val, int s_reg);
+    ::llvm::Type* LlvmTypeFromLocRec(RegLocation loc);
+    void InitIR();
+    ::llvm::BasicBlock* FindCaseTarget(uint32_t vaddr);
+    void ConvertPackedSwitch(BasicBlock* bb, int32_t table_offset,
+                             RegLocation rl_src);
+    void ConvertSparseSwitch(BasicBlock* bb, int32_t table_offset,
+                             RegLocation rl_src);
+    void ConvertSget(int32_t field_index,
+                     art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
+    void ConvertSput(int32_t field_index,
+                     art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
+    void ConvertFillArrayData(int32_t offset, RegLocation rl_array);
+    ::llvm::Value* EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
+                             RegLocation loc);
+    void EmitPopShadowFrame();
+    ::llvm::Value* EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
+                            RegLocation loc);
+    void ConvertMoveException(RegLocation rl_dest);
+    void ConvertThrow(RegLocation rl_src);
+    void ConvertMonitorEnterExit(int opt_flags,
+                                 art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
+    void ConvertArrayLength(int opt_flags, RegLocation rl_dest,
+                            RegLocation rl_src);
+    void EmitSuspendCheck();
+    ::llvm::Value* ConvertCompare(ConditionCode cc,
+                                  ::llvm::Value* src1, ::llvm::Value* src2);
+    void ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertCompareZeroAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
+                                     RegLocation rl_src1);
+    ::llvm::Value* GenDivModOp(bool is_div, bool is_long, ::llvm::Value* src1,
+                               ::llvm::Value* src2);
+    ::llvm::Value* GenArithOp(OpKind op, bool is_long, ::llvm::Value* src1,
+                              ::llvm::Value* src2);
+    void ConvertFPArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
+                      RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
+                         RegLocation rl_dest, RegLocation rl_src, int shift_amount);
+    void ConvertArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2);
+    void ConvertArithOpLit(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                           int32_t imm);
+    void ConvertInvoke(BasicBlock* bb, MIR* mir, InvokeType invoke_type,
+                       bool is_range, bool is_filled_new_array);
+    void ConvertConstObject(uint32_t idx,
+                            art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
+    void ConvertCheckCast(uint32_t type_idx, RegLocation rl_src);
+    void ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest);
+    void ConvertNewArray(uint32_t type_idx, RegLocation rl_dest,
+                         RegLocation rl_src);
+    void ConvertAget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index);
+    void ConvertAput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_src, RegLocation rl_array, RegLocation rl_index);
+    void ConvertIget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_dest, RegLocation rl_obj, int field_index);
+    void ConvertIput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_src, RegLocation rl_obj, int field_index);
+    void ConvertInstanceOf(uint32_t type_idx, RegLocation rl_dest,
+                           RegLocation rl_src);
+    void ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
+                               RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
+                             art::llvm::IntrinsicHelper::IntrinsicId id);
+    void ConvertNeg(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, RegLocation rl_src);
+    void ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_dest, RegLocation rl_src);
+    void ConvertNegFP(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertNot(RegLocation rl_dest, RegLocation rl_src);
+    void EmitConstructorBarrier();
+    bool ConvertMIRNode(MIR* mir, BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
+    void SetDexOffset(int32_t offset);
+    void SetMethodInfo();
+    void HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
+    void ConvertExtendedMIR(BasicBlock* bb, MIR* mir, ::llvm::BasicBlock* llvm_bb);
+    bool BlockBitcodeConversion(BasicBlock* bb);
+    ::llvm::FunctionType* GetFunctionType();
+    bool CreateFunction();
+    bool CreateLLVMBasicBlock(BasicBlock* bb);
+    void MethodMIR2Bitcode();
+
+    CompilationUnit* cu_;
+    MIRGraph* mir_graph_;
+    llvm::LlvmCompilationUnit* const llvm_compilation_unit_;
+    LLVMInfo* llvm_info_;
+    std::string symbol_;
+    ::llvm::LLVMContext* context_;
+    ::llvm::Module* module_;
+    ::llvm::Function* func_;
+    art::llvm::IntrinsicHelper* intrinsic_helper_;
+    art::llvm::IRBuilder* irb_;
+    ::llvm::BasicBlock* placeholder_bb_;
+    ::llvm::BasicBlock* entry_bb_;
+    ::llvm::BasicBlock* entry_target_bb_;
+    std::string bitcode_filename_;
+    GrowableArray< ::llvm::Value*> llvm_values_;
+    int32_t temp_name_;
+    SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map_;  // block id -> llvm bb.
+    int current_dalvik_offset_;
+};  // Class MirConverter
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
new file mode 100644
index 0000000..9dd7daf
--- /dev/null
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register usage conventions.
+ *
+ * r0-r3: Argument registers in both Dalvik and C/C++ conventions.
+ *        However, for Dalvik->Dalvik calls we'll pass the target's Method*
+ *        pointer in r0 as a hidden arg0. Otherwise used as codegen scratch
+ *        registers.
+ * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit
+ * r4   : (rARM_SUSPEND) is reserved (suspend check/debugger assist)
+ * r5   : Callee save (promotion target)
+ * r6   : Callee save (promotion target)
+ * r7   : Callee save (promotion target)
+ * r8   : Callee save (promotion target)
+ * r9   : (rARM_SELF) is reserved (pointer to thread-local storage)
+ * r10  : Callee save (promotion target)
+ * r11  : Callee save (promotion target)
+ * r12  : Scratch, may be trashed by linkage stubs
+ * r13  : (sp) is reserved
+ * r14  : (lr) is reserved
+ * r15  : (pc) is reserved
+ *
+ * 5 core temps that codegen can use (r0, r1, r2, r3, r12)
+ * 7 core registers that can be used for promotion
+ *
+ * Floating pointer registers
+ * s0-s31
+ * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31}
+ *
+ * s16-s31 (d8-d15) preserved across C calls
+ * s0-s15 (d0-d7) trashed across C calls
+ *
+ * s0-s15/d0-d7 used as codegen temp/scratch
+ * s16-s31/d8-d31 can be used for promotion.
+ *
+ * Calling convention
+ *     o On a call to a Dalvik method, pass target's Method* in r0
+ *     o r1-r3 will be used for up to the first 3 words of arguments
+ *     o Arguments past the first 3 words will be placed in appropriate
+ *       out slots by the caller.
+ *     o If a 64-bit argument would span the register/memory argument
+ *       boundary, it will instead be fully passed in the frame.
+ *     o Maintain a 16-byte stack alignment
+ *
+ *  Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1]              |  {Note: resides in caller's frame}
+ * |       .                |
+ * | IN[0]                  |
+ * | caller's Method*       |
+ * +========================+  {Note: start of callee's frame}
+ * | spill region           |  {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1]            |
+ * | V[locals-2]            |
+ * |      .                 |
+ * |      .                 |
+ * | V[1]                   |
+ * | V[0]                   |
+ * +------------------------+
+ * |  0 to 3 words padding  |
+ * +------------------------+
+ * | OUT[outs-1]            |
+ * | OUT[outs-2]            |
+ * |       .                |
+ * | OUT[0]                 |
+ * | cur_method*            | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define ARM_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define ARM_FP_DOUBLE 64
+// First FP callee save.
+#define ARM_FP_CALLEE_SAVE_BASE 16
+// Reg types.
+#define ARM_REGTYPE(x) (x & (ARM_FP_REG_OFFSET | ARM_FP_DOUBLE))
+#define ARM_FPREG(x) ((x & ARM_FP_REG_OFFSET) == ARM_FP_REG_OFFSET)
+#define ARM_LOWREG(x) ((x & 0x7) == x)
+#define ARM_DOUBLEREG(x) ((x & ARM_FP_DOUBLE) == ARM_FP_DOUBLE)
+#define ARM_SINGLEREG(x) (ARM_FPREG(x) && !ARM_DOUBLEREG(x))
+
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area.  Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define ARM_S2D(x,y) ((x) | ARM_FP_DOUBLE)
+// Mask to strip off fp flags.
+#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
+
+// RegisterLocation templates return values (r0, or r0/r1).
+#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r0, INVALID_REG,\
+                          INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1, \
+                               INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_FLOAT  ARM_LOC_C_RETURN
+#define ARM_LOC_C_RETURN_DOUBLE  ARM_LOC_C_RETURN_WIDE
+
+enum ArmResourceEncodingPos {
+  kArmGPReg0   = 0,
+  kArmRegSP    = 13,
+  kArmRegLR    = 14,
+  kArmRegPC    = 15,
+  kArmFPReg0   = 16,
+  kArmFPReg16  = 32,
+  kArmRegEnd   = 48,
+};
+
+#define ENCODE_ARM_REG_LIST(N)      (static_cast<uint64_t>(N))
+#define ENCODE_ARM_REG_SP           (1ULL << kArmRegSP)
+#define ENCODE_ARM_REG_LR           (1ULL << kArmRegLR)
+#define ENCODE_ARM_REG_PC           (1ULL << kArmRegPC)
+#define ENCODE_ARM_REG_FPCS_LIST(N) (static_cast<uint64_t>(N) << kArmFPReg16)
+
+enum ArmNativeRegisterPool {
+  r0   = 0,
+  r1   = 1,
+  r2   = 2,
+  r3   = 3,
+  rARM_SUSPEND = 4,
+  r5   = 5,
+  r6   = 6,
+  r7   = 7,
+  r8   = 8,
+  rARM_SELF  = 9,
+  r10  = 10,
+  r11  = 11,
+  r12  = 12,
+  r13sp  = 13,
+  rARM_SP  = 13,
+  r14lr  = 14,
+  rARM_LR  = 14,
+  r15pc  = 15,
+  rARM_PC  = 15,
+  fr0  =  0 + ARM_FP_REG_OFFSET,
+  fr1  =  1 + ARM_FP_REG_OFFSET,
+  fr2  =  2 + ARM_FP_REG_OFFSET,
+  fr3  =  3 + ARM_FP_REG_OFFSET,
+  fr4  =  4 + ARM_FP_REG_OFFSET,
+  fr5  =  5 + ARM_FP_REG_OFFSET,
+  fr6  =  6 + ARM_FP_REG_OFFSET,
+  fr7  =  7 + ARM_FP_REG_OFFSET,
+  fr8  =  8 + ARM_FP_REG_OFFSET,
+  fr9  =  9 + ARM_FP_REG_OFFSET,
+  fr10 = 10 + ARM_FP_REG_OFFSET,
+  fr11 = 11 + ARM_FP_REG_OFFSET,
+  fr12 = 12 + ARM_FP_REG_OFFSET,
+  fr13 = 13 + ARM_FP_REG_OFFSET,
+  fr14 = 14 + ARM_FP_REG_OFFSET,
+  fr15 = 15 + ARM_FP_REG_OFFSET,
+  fr16 = 16 + ARM_FP_REG_OFFSET,
+  fr17 = 17 + ARM_FP_REG_OFFSET,
+  fr18 = 18 + ARM_FP_REG_OFFSET,
+  fr19 = 19 + ARM_FP_REG_OFFSET,
+  fr20 = 20 + ARM_FP_REG_OFFSET,
+  fr21 = 21 + ARM_FP_REG_OFFSET,
+  fr22 = 22 + ARM_FP_REG_OFFSET,
+  fr23 = 23 + ARM_FP_REG_OFFSET,
+  fr24 = 24 + ARM_FP_REG_OFFSET,
+  fr25 = 25 + ARM_FP_REG_OFFSET,
+  fr26 = 26 + ARM_FP_REG_OFFSET,
+  fr27 = 27 + ARM_FP_REG_OFFSET,
+  fr28 = 28 + ARM_FP_REG_OFFSET,
+  fr29 = 29 + ARM_FP_REG_OFFSET,
+  fr30 = 30 + ARM_FP_REG_OFFSET,
+  fr31 = 31 + ARM_FP_REG_OFFSET,
+  dr0 = fr0 + ARM_FP_DOUBLE,
+  dr1 = fr2 + ARM_FP_DOUBLE,
+  dr2 = fr4 + ARM_FP_DOUBLE,
+  dr3 = fr6 + ARM_FP_DOUBLE,
+  dr4 = fr8 + ARM_FP_DOUBLE,
+  dr5 = fr10 + ARM_FP_DOUBLE,
+  dr6 = fr12 + ARM_FP_DOUBLE,
+  dr7 = fr14 + ARM_FP_DOUBLE,
+  dr8 = fr16 + ARM_FP_DOUBLE,
+  dr9 = fr18 + ARM_FP_DOUBLE,
+  dr10 = fr20 + ARM_FP_DOUBLE,
+  dr11 = fr22 + ARM_FP_DOUBLE,
+  dr12 = fr24 + ARM_FP_DOUBLE,
+  dr13 = fr26 + ARM_FP_DOUBLE,
+  dr14 = fr28 + ARM_FP_DOUBLE,
+  dr15 = fr30 + ARM_FP_DOUBLE,
+};
+
+// Target-independent aliases.
+#define rARM_ARG0 r0
+#define rARM_ARG1 r1
+#define rARM_ARG2 r2
+#define rARM_ARG3 r3
+#define rARM_FARG0 r0
+#define rARM_FARG1 r1
+#define rARM_FARG2 r2
+#define rARM_FARG3 r3
+#define rARM_RET0 r0
+#define rARM_RET1 r1
+#define rARM_INVOKE_TGT rARM_LR
+#define rARM_COUNT INVALID_REG
+
+enum ArmShiftEncodings {
+  kArmLsl = 0x0,
+  kArmLsr = 0x1,
+  kArmAsr = 0x2,
+  kArmRor = 0x3
+};
+
+/*
+ * The following enum defines the list of supported Thumb instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum ArmOpcode {
+  kArmFirst = 0,
+  kArm16BitData = kArmFirst, // DATA   [0] rd[15..0].
+  kThumbAdcRR,       // adc   [0100000101] rm[5..3] rd[2..0].
+  kThumbAddRRI3,     // add(1)  [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
+  kThumbAddRI8,      // add(2)  [00110] rd[10..8] imm_8[7..0].
+  kThumbAddRRR,      // add(3)  [0001100] rm[8..6] rn[5..3] rd[2..0].
+  kThumbAddRRLH,     // add(4)  [01000100] H12[01] rm[5..3] rd[2..0].
+  kThumbAddRRHL,     // add(4)  [01001000] H12[10] rm[5..3] rd[2..0].
+  kThumbAddRRHH,     // add(4)  [01001100] H12[11] rm[5..3] rd[2..0].
+  kThumbAddPcRel,    // add(5)  [10100] rd[10..8] imm_8[7..0].
+  kThumbAddSpRel,    // add(6)  [10101] rd[10..8] imm_8[7..0].
+  kThumbAddSpI7,     // add(7)  [101100000] imm_7[6..0].
+  kThumbAndRR,       // and   [0100000000] rm[5..3] rd[2..0].
+  kThumbAsrRRI5,     // asr(1)  [00010] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbAsrRR,       // asr(2)  [0100000100] rs[5..3] rd[2..0].
+  kThumbBCond,       // b(1)  [1101] cond[11..8] offset_8[7..0].
+  kThumbBUncond,     // b(2)  [11100] offset_11[10..0].
+  kThumbBicRR,       // bic   [0100001110] rm[5..3] rd[2..0].
+  kThumbBkpt,        // bkpt  [10111110] imm_8[7..0].
+  kThumbBlx1,        // blx(1)  [111] H[10] offset_11[10..0].
+  kThumbBlx2,        // blx(1)  [111] H[01] offset_11[10..0].
+  kThumbBl1,         // blx(1)  [111] H[10] offset_11[10..0].
+  kThumbBl2,         // blx(1)  [111] H[11] offset_11[10..0].
+  kThumbBlxR,        // blx(2)  [010001111] rm[6..3] [000].
+  kThumbBx,          // bx    [010001110] H2[6..6] rm[5..3] SBZ[000].
+  kThumbCmnRR,       // cmn   [0100001011] rm[5..3] rd[2..0].
+  kThumbCmpRI8,      // cmp(1)  [00101] rn[10..8] imm_8[7..0].
+  kThumbCmpRR,       // cmp(2)  [0100001010] rm[5..3] rd[2..0].
+  kThumbCmpLH,       // cmp(3)  [01000101] H12[01] rm[5..3] rd[2..0].
+  kThumbCmpHL,       // cmp(3)  [01000110] H12[10] rm[5..3] rd[2..0].
+  kThumbCmpHH,       // cmp(3)  [01000111] H12[11] rm[5..3] rd[2..0].
+  kThumbEorRR,       // eor   [0100000001] rm[5..3] rd[2..0].
+  kThumbLdmia,       // ldmia   [11001] rn[10..8] reglist [7..0].
+  kThumbLdrRRI5,     // ldr(1)  [01101] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrRRR,      // ldr(2)  [0101100] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrPcRel,    // ldr(3)  [01001] rd[10..8] imm_8[7..0].
+  kThumbLdrSpRel,    // ldr(4)  [10011] rd[10..8] imm_8[7..0].
+  kThumbLdrbRRI5,    // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrbRRR,     // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrhRRI5,    // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrhRRR,     // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrsbRRR,    // ldrsb   [0101011] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrshRRR,    // ldrsh   [0101111] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLslRRI5,     // lsl(1)  [00000] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbLslRR,       // lsl(2)  [0100000010] rs[5..3] rd[2..0].
+  kThumbLsrRRI5,     // lsr(1)  [00001] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbLsrRR,       // lsr(2)  [0100000011] rs[5..3] rd[2..0].
+  kThumbMovImm,      // mov(1)  [00100] rd[10..8] imm_8[7..0].
+  kThumbMovRR,       // mov(2)  [0001110000] rn[5..3] rd[2..0].
+  kThumbMovRR_H2H,   // mov(3)  [01000111] H12[11] rm[5..3] rd[2..0].
+  kThumbMovRR_H2L,   // mov(3)  [01000110] H12[01] rm[5..3] rd[2..0].
+  kThumbMovRR_L2H,   // mov(3)  [01000101] H12[10] rm[5..3] rd[2..0].
+  kThumbMul,         // mul   [0100001101] rm[5..3] rd[2..0].
+  kThumbMvn,         // mvn   [0100001111] rm[5..3] rd[2..0].
+  kThumbNeg,         // neg   [0100001001] rm[5..3] rd[2..0].
+  kThumbOrr,         // orr   [0100001100] rm[5..3] rd[2..0].
+  kThumbPop,         // pop   [1011110] r[8..8] rl[7..0].
+  kThumbPush,        // push  [1011010] r[8..8] rl[7..0].
+  kThumbRorRR,       // ror   [0100000111] rs[5..3] rd[2..0].
+  kThumbSbc,         // sbc   [0100000110] rm[5..3] rd[2..0].
+  kThumbStmia,       // stmia   [11000] rn[10..8] reglist [7.. 0].
+  kThumbStrRRI5,     // str(1)  [01100] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrRRR,      // str(2)  [0101000] rm[8..6] rn[5..3] rd[2..0].
+  kThumbStrSpRel,    // str(3)  [10010] rd[10..8] imm_8[7..0].
+  kThumbStrbRRI5,    // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrbRRR,     // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0].
+  kThumbStrhRRI5,    // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrhRRR,     // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0].
+  kThumbSubRRI3,     // sub(1)  [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
+  kThumbSubRI8,      // sub(2)  [00111] rd[10..8] imm_8[7..0].
+  kThumbSubRRR,      // sub(3)  [0001101] rm[8..6] rn[5..3] rd[2..0].
+  kThumbSubSpI7,     // sub(4)  [101100001] imm_7[6..0].
+  kThumbSwi,         // swi   [11011111] imm_8[7..0].
+  kThumbTst,         // tst   [0100001000] rm[5..3] rn[2..0].
+  kThumb2Vldrs,      // vldr low  sx [111011011001] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+  kThumb2Vldrd,      // vldr low  dx [111011011001] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+  kThumb2Vmuls,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vmuld,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2Vstrs,      // vstr low  sx [111011011000] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+  kThumb2Vstrd,      // vstr low  dx [111011011000] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+  kThumb2Vsubs,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100040] rm[3..0].
+  kThumb2Vsubd,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110040] rm[3..0].
+  kThumb2Vadds,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vaddd,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2Vdivs,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vdivd,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2VcvtIF,     // vcvt.F32 vd, vm [1110111010111000] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtID,     // vcvt.F64 vd, vm [1110111010111000] vd[15..12] [10111100] vm[3..0].
+  kThumb2VcvtFI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtDI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10111100] vm[3..0].
+  kThumb2VcvtFd,     // vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtDF,     // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0].
+  kThumb2Vsqrts,     // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0].
+  kThumb2Vsqrtd,     // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0].
+  kThumb2MovImmShift,// mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
+  kThumb2MovImm16,   // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8.
+  kThumb2StrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+  kThumb2LdrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+  kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2Cbnz,       // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0].
+  kThumb2Cbz,        // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0].
+  kThumb2AddRRI12,   // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+  kThumb2MovRR,      // mov rd, rm [11101010010011110000] rd[11..8] [0000] rm[3..0].
+  kThumb2Vmovs,      // vmov.f32 vd, vm [111011101] D [110000] vd[15..12] 101001] M [0] vm[3..0].
+  kThumb2Vmovd,      // vmov.f64 vd, vm [111011101] D [110000] vd[15..12] 101101] M [0] vm[3..0].
+  kThumb2Ldmia,      // ldmia  [111010001001[ rn[19..16] mask[15..0].
+  kThumb2Stmia,      // stmia  [111010001000[ rn[19..16] mask[15..0].
+  kThumb2AddRRR,     // add [111010110000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2SubRRR,     // sub [111010111010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2SbcRRR,     // sbc [111010110110] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2CmpRR,      // cmp [111010111011] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2SubRRI12,   // sub rd, rn, #imm12 [11110] i [01010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+  kThumb2MvnImm12,   // mov(T2) rd, #<const> [11110] i [00011011110] imm3 rd[11..8] imm8.
+  kThumb2Sel,        // sel rd, rn, rm [111110101010] rn[19-16] rd[11-8] rm[3-0].
+  kThumb2Ubfx,       // ubfx rd,rn,#lsb,#width [111100111100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+  kThumb2Sbfx,       // ubfx rd,rn,#lsb,#width [111100110100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+  kThumb2LdrRRR,     // ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrhRRR,    // ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrshRRR,   // ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrbRRR,    // ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrsbRRR,   // ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrRRR,     // str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrhRRR,    // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrbRRR,    // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrhRRI12,  // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrshRRI12, // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrbRRI12,  // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrsbRRI12, // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2StrhRRI12,  // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2StrbRRI12,  // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2Pop,        // pop   [1110100010111101] list[15-0]*/
+  kThumb2Push,       // push  [1110100100101101] list[15-0]*/
+  kThumb2CmpRI12,    // cmp rn, #<const> [11110] i [011011] rn[19-16] [0] imm3 [1111] imm8[7..0].
+  kThumb2AdcRRR,     // adc [111010110101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2AndRRR,     // and [111010100000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2BicRRR,     // bic [111010100010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2CmnRR,      // cmn [111010110001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2EorRRR,     // eor [111010101000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2MulRRR,     // mul [111110110000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2MnvRR,      // mvn [11101010011011110] rd[11-8] [0000] rm[3..0].
+  kThumb2RsubRRI8,   // rsub [111100011100] rn[19..16] [0000] rd[11..8] imm8[7..0].
+  kThumb2NegRR,      // actually rsub rd, rn, #0.
+  kThumb2OrrRRR,     // orr [111010100100] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2TstRR,      // tst [111010100001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2LslRRR,     // lsl [111110100000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2LsrRRR,     // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2AsrRRR,     // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2RorRRR,     // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2LslRRI5,    // lsl [11101010010011110] imm[14.12] rd[11..8] [00] rm[3..0].
+  kThumb2LsrRRI5,    // lsr [11101010010011110] imm[14.12] rd[11..8] [01] rm[3..0].
+  kThumb2AsrRRI5,    // asr [11101010010011110] imm[14.12] rd[11..8] [10] rm[3..0].
+  kThumb2RorRRI5,    // ror [11101010010011110] imm[14.12] rd[11..8] [11] rm[3..0].
+  kThumb2BicRRI8,    // bic [111100000010] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AndRRI8,    // bic [111100000000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2OrrRRI8,    // orr [111100000100] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2EorRRI8,    // eor [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AddRRI8,    // add [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AdcRRI8,    // adc [111100010101] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2SubRRI8,    // sub [111100011011] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2SbcRRI8,    // sbc [111100010111] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2It,         // it [10111111] firstcond[7-4] mask[3-0].
+  kThumb2Fmstat,     // fmstat [11101110111100011111101000010000].
+  kThumb2Vcmpd,      // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
+  kThumb2Vcmps,      // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0].
+  kThumb2LdrPcRel12, // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2BCond,      // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0].
+  kThumb2Vmovd_RR,   // vmov [111011101] D [110000] vd[15-12 [101101] M [0] vm[3-0].
+  kThumb2Vmovs_RR,   // vmov [111011101] D [110000] vd[15-12 [101001] M [0] vm[3-0].
+  kThumb2Fmrs,       // vmov [111011100000] vn[19-16] rt[15-12] [1010] N [0010000].
+  kThumb2Fmsr,       // vmov [111011100001] vn[19-16] rt[15-12] [1010] N [0010000].
+  kThumb2Fmrrd,      // vmov [111011000100] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+  kThumb2Fmdrr,      // vmov [111011000101] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+  kThumb2Vabsd,      // vabs.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+  kThumb2Vabss,      // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+  kThumb2Vnegd,      // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+  kThumb2Vnegs,      // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+  kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
+  kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
+  kThumb2Mla,        // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0].
+  kThumb2Umull,      // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
+  kThumb2Ldrex,      // ldrex [111010000101] rn[19-16] rt[11-8] [1111] imm8[7-0].
+  kThumb2Strex,      // strex [111010000100] rn[19-16] rt[11-8] rd[11-8] imm8[7-0].
+  kThumb2Clrex,      // clrex [111100111011111110000111100101111].
+  kThumb2Bfi,        // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+  kThumb2Bfc,        // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+  kThumb2Dmb,        // dmb [1111001110111111100011110101] option[3-0].
+  kThumb2LdrPcReln12,// ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2Stm,        // stm <list> [111010010000] rn[19-16] 000 rl[12-0].
+  kThumbUndefined,   // undefined [11011110xxxxxxxx].
+  kThumb2VPopCS,     // vpop <list of callee save fp singles (s16+).
+  kThumb2VPushCS,    // vpush <list callee save fp singles (s16+).
+  kThumb2Vldms,      // vldms rd, <list>.
+  kThumb2Vstms,      // vstms rd, <list>.
+  kThumb2BUncond,    // b <label>.
+  kThumb2MovImm16H,  // similar to kThumb2MovImm16, but target high hw.
+  kThumb2AddPCR,     // Thumb2 2-operand add with hard-coded PC target.
+  kThumb2Adr,        // Special purpose encoding of ADR for switch tables.
+  kThumb2MovImm16LST,// Special purpose version for switch table use.
+  kThumb2MovImm16HST,// Special purpose version for switch table use.
+  kThumb2LdmiaWB,    // ldmia  [111010011001[ rn[19..16] mask[15..0].
+  kThumb2SubsRRI12,  // setflags encoding.
+  kThumb2OrrRRRs,    // orrx [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2Push1,      // t3 encoding of push.
+  kThumb2Pop1,       // t3 encoding of pop.
+  kThumb2RsubRRR,    // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2Smull,      // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
+  kThumb2LdrdPcRel8, // ldrd rt, rt2, pc +-/1024.
+  kThumb2LdrdI8,     // ldrd rt, rt2, [rn +-/1024].
+  kThumb2StrdI8,     // strd rt, rt2, [rn +-/1024].
+  kArmLast,
+};
+
+enum ArmOpDmbOptions {
+  kSY = 0xf,
+  kST = 0xe,
+  kISH = 0xb,
+  kISHST = 0xa,
+  kNSH = 0x7,
+  kNSHST = 0x6
+};
+
+// Instruction assembly field_loc kind.
+enum ArmEncodingKind {
+  kFmtUnused,
+  kFmtBitBlt,    // Bit string using end/start.
+  kFmtDfp,       // Double FP reg.
+  kFmtSfp,       // Single FP reg.
+  kFmtModImm,    // Shifted 8-bit immed using [26,14..12,7..0].
+  kFmtImm16,     // Zero-extended immed using [26,19..16,14..12,7..0].
+  kFmtImm6,      // Encoded branch target using [9,7..3]0.
+  kFmtImm12,     // Zero-extended immediate using [26,14..12,7..0].
+  kFmtShift,     // Shift descriptor, [14..12,7..4].
+  kFmtLsb,       // least significant bit using [14..12][7..6].
+  kFmtBWidth,    // bit-field width, encoded as width-1.
+  kFmtShift5,    // Shift count, [14..12,7..6].
+  kFmtBrOffset,  // Signed extended [26,11,13,21-16,10-0]:0.
+  kFmtFPImm,     // Encoded floating point immediate.
+  kFmtOff24,     // 24-bit Thumb2 unconditional branch encoding.
+};
+
+// Struct used to define the snippet positions for each Thumb opcode.
+struct ArmEncodingMap {
+  uint32_t skeleton;
+  struct {
+    ArmEncodingKind kind;
+    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
+    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+  } field_loc[4];
+  ArmOpcode opcode;
+  uint64_t flags;
+  const char* name;
+  const char* fmt;
+  int size;   // Note: size is in bytes.
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
new file mode 100644
index 0000000..e804215
--- /dev/null
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -0,0 +1,1397 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/quick/mir_to_lir-inl.h"
+
+namespace art {
+
+/*
+ * opcode: ArmOpcode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+                     k3, k3s, k3e, flags, name, fmt, size) \
+        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ *     0 -> operands[0] (dest)
+ *     1 -> operands[1] (src1)
+ *     2 -> operands[2] (src2)
+ *     3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ *     h -> 4-digit hex
+ *     d -> decimal
+ *     E -> decimal*4
+ *     F -> decimal*2
+ *     c -> branch condition (beq, bne, etc.)
+ *     t -> pc-relative target
+ *     u -> 1st half of bl[x] target
+ *     v -> 2nd half ob bl[x] target
+ *     R -> register list
+ *     s -> single precision floating point register
+ *     S -> double precision floating point register
+ *     m -> Thumb2 modified immediate
+ *     n -> complimented Thumb2 modified immediate
+ *     M -> Thumb2 16-bit zero-extended immediate
+ *     b -> 4-digit binary
+ *     B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
+ *     H -> operand shift
+ *     C -> core register name
+ *     P -> fp cs register list (base of s16)
+ *     Q -> fp cs register list (base of s0)
+ *
+ *  [!] escape.  To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum ArmOpcode from LIR.h */
+const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
+    ENCODING_MAP(kArm16BitData,    0x0000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 2),
+    ENCODING_MAP(kThumbAdcRR,        0x4140,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES | USES_CCODES,
+                 "adcs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbAddRRI3,      0x1c00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "adds", "!0C, !1C, #!2d", 2),
+    ENCODING_MAP(kThumbAddRI8,       0x3000,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
+                 "adds", "!0C, !0C, #!1d", 2),
+    ENCODING_MAP(kThumbAddRRR,       0x1800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "adds", "!0C, !1C, !2C", 2),
+    ENCODING_MAP(kThumbAddRRLH,     0x4440,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+                 "add", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbAddRRHL,     0x4480,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+                 "add", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbAddRRHH,     0x44c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+                 "add", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbAddPcRel,    0xa000,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH | NEEDS_FIXUP,
+                 "add", "!0C, pc, #!1E", 2),
+    ENCODING_MAP(kThumbAddSpRel,    0xa800,
+                 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP,
+                 "add", "!0C, sp, #!2E", 2),
+    ENCODING_MAP(kThumbAddSpI7,      0xb000,
+                 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
+                 "add", "sp, #!0d*4", 2),
+    ENCODING_MAP(kThumbAndRR,        0x4000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "ands", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbAsrRRI5,      0x1000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "asrs", "!0C, !1C, #!2d", 2),
+    ENCODING_MAP(kThumbAsrRR,        0x4100,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "asrs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbBCond,        0xd000,
+                 kFmtBitBlt, 7, 0, kFmtBitBlt, 11, 8, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | USES_CCODES |
+                 NEEDS_FIXUP, "b!1c", "!0t", 2),
+    ENCODING_MAP(kThumbBUncond,      0xe000,
+                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+                 "b", "!0t", 2),
+    ENCODING_MAP(kThumbBicRR,        0x4380,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "bics", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbBkpt,          0xbe00,
+                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+                 "bkpt", "!0d", 2),
+    ENCODING_MAP(kThumbBlx1,         0xf000,
+                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
+                 NEEDS_FIXUP, "blx_1", "!0u", 2),
+    ENCODING_MAP(kThumbBlx2,         0xe800,
+                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
+                 NEEDS_FIXUP, "blx_2", "!0v", 2),
+    ENCODING_MAP(kThumbBl1,          0xf000,
+                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+                 "bl_1", "!0u", 2),
+    ENCODING_MAP(kThumbBl2,          0xf800,
+                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+                 "bl_2", "!0v", 2),
+    ENCODING_MAP(kThumbBlxR,         0x4780,
+                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_USE0 | IS_BRANCH | REG_DEF_LR,
+                 "blx", "!0C", 2),
+    ENCODING_MAP(kThumbBx,            0x4700,
+                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+                 "bx", "!0C", 2),
+    ENCODING_MAP(kThumbCmnRR,        0x42c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmn", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbCmpRI8,       0x2800,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | SETS_CCODES,
+                 "cmp", "!0C, #!1d", 2),
+    ENCODING_MAP(kThumbCmpRR,        0x4280,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmp", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbCmpLH,        0x4540,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmp", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbCmpHL,        0x4580,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmp", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbCmpHH,        0x45c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmp", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbEorRR,        0x4040,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "eors", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbLdmia,         0xc800,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+                 "ldmia", "!0C!!, <!1R>", 2),
+    ENCODING_MAP(kThumbLdrRRI5,      0x6800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldr", "!0C, [!1C, #!2E]", 2),
+    ENCODING_MAP(kThumbLdrRRR,       0x5800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldr", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbLdrPcRel,    0x4800,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
+                 | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2),
+    ENCODING_MAP(kThumbLdrSpRel,    0x9800,
+                 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
+                 | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2),
+    ENCODING_MAP(kThumbLdrbRRI5,     0x7800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrb", "!0C, [!1C, #2d]", 2),
+    ENCODING_MAP(kThumbLdrbRRR,      0x5c00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrb", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbLdrhRRI5,     0x8800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrh", "!0C, [!1C, #!2F]", 2),
+    ENCODING_MAP(kThumbLdrhRRR,      0x5a00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrh", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbLdrsbRRR,     0x5600,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrsb", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbLdrshRRR,     0x5e00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrsh", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbLslRRI5,      0x0000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "lsls", "!0C, !1C, #!2d", 2),
+    ENCODING_MAP(kThumbLslRR,        0x4080,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "lsls", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbLsrRRI5,      0x0800,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "lsrs", "!0C, !1C, #!2d", 2),
+    ENCODING_MAP(kThumbLsrRR,        0x40c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "lsrs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMovImm,       0x2000,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0 | SETS_CCODES,
+                 "movs", "!0C, #!1d", 2),
+    ENCODING_MAP(kThumbMovRR,        0x1c00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "movs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMovRR_H2H,    0x46c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMovRR_H2L,    0x4640,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMovRR_L2H,    0x4680,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMul,           0x4340,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "muls", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbMvn,           0x43c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "mvns", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbNeg,           0x4240,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "negs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbOrr,           0x4300,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "orrs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbPop,           0xbc00,
+                 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
+                 | IS_LOAD, "pop", "<!0R>", 2),
+    ENCODING_MAP(kThumbPush,          0xb400,
+                 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
+                 | IS_STORE, "push", "<!0R>", 2),
+    ENCODING_MAP(kThumbRorRR,        0x41c0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+                 "rors", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbSbc,           0x4180,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE01 | USES_CCODES | SETS_CCODES,
+                 "sbcs", "!0C, !1C", 2),
+    ENCODING_MAP(kThumbStmia,         0xc000,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0 | REG_USE0 | REG_USE_LIST1 | IS_STORE,
+                 "stmia", "!0C!!, <!1R>", 2),
+    ENCODING_MAP(kThumbStrRRI5,      0x6000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "str", "!0C, [!1C, #!2E]", 2),
+    ENCODING_MAP(kThumbStrRRR,       0x5000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+                 "str", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbStrSpRel,    0x9000,
+                 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
+                 | IS_STORE, "str", "!0C, [sp, #!2E]", 2),
+    ENCODING_MAP(kThumbStrbRRI5,     0x7000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "strb", "!0C, [!1C, #!2d]", 2),
+    ENCODING_MAP(kThumbStrbRRR,      0x5400,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+                 "strb", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbStrhRRI5,     0x8000,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "strh", "!0C, [!1C, #!2F]", 2),
+    ENCODING_MAP(kThumbStrhRRR,      0x5200,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+                 "strh", "!0C, [!1C, !2C]", 2),
+    ENCODING_MAP(kThumbSubRRI3,      0x1e00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "subs", "!0C, !1C, #!2d", 2),
+    ENCODING_MAP(kThumbSubRI8,       0x3800,
+                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
+                 "subs", "!0C, #!1d", 2),
+    ENCODING_MAP(kThumbSubRRR,       0x1a00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "subs", "!0C, !1C, !2C", 2),
+    ENCODING_MAP(kThumbSubSpI7,      0xb080,
+                 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
+                 "sub", "sp, #!0d*4", 2),
+    ENCODING_MAP(kThumbSwi,           0xdf00,
+                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+                 "swi", "!0d", 2),
+    ENCODING_MAP(kThumbTst,           0x4200,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE01 | SETS_CCODES,
+                 "tst", "!0C, !1C", 2),
+    ENCODING_MAP(kThumb2Vldrs,       0xed900a00,
+                 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Vldrd,       0xed900b00,
+                 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Vmuls,        0xee200a00,
+                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vmuls", "!0s, !1s, !2s", 4),
+    ENCODING_MAP(kThumb2Vmuld,        0xee200b00,
+                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vmuld", "!0S, !1S, !2S", 4),
+    ENCODING_MAP(kThumb2Vstrs,       0xed800a00,
+                 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "vstr", "!0s, [!1C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Vstrd,       0xed800b00,
+                 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "vstr", "!0S, [!1C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Vsubs,        0xee300a40,
+                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vsub", "!0s, !1s, !2s", 4),
+    ENCODING_MAP(kThumb2Vsubd,        0xee300b40,
+                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vsub", "!0S, !1S, !2S", 4),
+    ENCODING_MAP(kThumb2Vadds,        0xee300a00,
+                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vadd", "!0s, !1s, !2s", 4),
+    ENCODING_MAP(kThumb2Vaddd,        0xee300b00,
+                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vadd", "!0S, !1S, !2S", 4),
+    ENCODING_MAP(kThumb2Vdivs,        0xee800a00,
+                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vdivs", "!0s, !1s, !2s", 4),
+    ENCODING_MAP(kThumb2Vdivd,        0xee800b00,
+                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "vdivd", "!0S, !1S, !2S", 4),
+    ENCODING_MAP(kThumb2VcvtIF,       0xeeb80ac0,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.f32", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2VcvtID,       0xeeb80bc0,
+                 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.f64", "!0S, !1s", 4),
+    ENCODING_MAP(kThumb2VcvtFI,       0xeebd0ac0,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.s32.f32 ", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2VcvtDI,       0xeebd0bc0,
+                 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.s32.f64 ", "!0s, !1S", 4),
+    ENCODING_MAP(kThumb2VcvtFd,       0xeeb70ac0,
+                 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.f64.f32 ", "!0S, !1s", 4),
+    ENCODING_MAP(kThumb2VcvtDF,       0xeeb70bc0,
+                 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vcvt.f32.f64 ", "!0s, !1S", 4),
+    ENCODING_MAP(kThumb2Vsqrts,       0xeeb10ac0,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vsqrt.f32 ", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2Vsqrtd,       0xeeb10bc0,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vsqrt.f64 ", "!0S, !1S", 4),
+    ENCODING_MAP(kThumb2MovImmShift, 0xf04f0000, /* no setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "mov", "!0C, #!1m", 4),
+    ENCODING_MAP(kThumb2MovImm16,       0xf2400000,
+                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "mov", "!0C, #!1M", 4),
+    ENCODING_MAP(kThumb2StrRRI12,       0xf8c00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "str", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2LdrRRI12,       0xf8d00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldr", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2StrRRI8Predec,       0xf8400c00,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "str", "!0C, [!1C, #-!2d]", 4),
+    ENCODING_MAP(kThumb2LdrRRI8Predec,       0xf8500c00,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldr", "!0C, [!1C, #-!2d]", 4),
+    ENCODING_MAP(kThumb2Cbnz,       0xb900, /* Note: does not affect flags */
+                 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
+                 NEEDS_FIXUP, "cbnz", "!0C,!1t", 2),
+    ENCODING_MAP(kThumb2Cbz,       0xb100, /* Note: does not affect flags */
+                 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
+                 NEEDS_FIXUP, "cbz", "!0C,!1t", 2),
+    ENCODING_MAP(kThumb2AddRRI12,       0xf2000000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
+                 "add", "!0C,!1C,#!2d", 4),
+    ENCODING_MAP(kThumb2MovRR,       0xea4f0000, /* no setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov", "!0C, !1C", 4),
+    ENCODING_MAP(kThumb2Vmovs,       0xeeb00a40,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vmov.f32 ", " !0s, !1s", 4),
+    ENCODING_MAP(kThumb2Vmovd,       0xeeb00b40,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vmov.f64 ", " !0S, !1S", 4),
+    ENCODING_MAP(kThumb2Ldmia,         0xe8900000,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+                 "ldmia", "!0C!!, <!1R>", 4),
+    ENCODING_MAP(kThumb2Stmia,         0xe8800000,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | REG_USE_LIST1 | IS_STORE,
+                 "stmia", "!0C!!, <!1R>", 4),
+    ENCODING_MAP(kThumb2AddRRR,  0xeb100000, /* setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1,
+                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "adds", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2SubRRR,       0xebb00000, /* setflags enconding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1,
+                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "subs", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2SbcRRR,       0xeb700000, /* setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1,
+                 IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES | SETS_CCODES,
+                 "sbcs", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2CmpRR,       0xebb00f00,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
+                 "cmp", "!0C, !1C", 4),
+    ENCODING_MAP(kThumb2SubRRI12,       0xf2a00000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
+                 "sub", "!0C,!1C,#!2d", 4),
+    ENCODING_MAP(kThumb2MvnImm12,  0xf06f0000, /* no setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtImm12, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "mvn", "!0C, #!1n", 4),
+    ENCODING_MAP(kThumb2Sel,       0xfaa0f080,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
+                 "sel", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2Ubfx,       0xf3c00000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
+                 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+                 "ubfx", "!0C, !1C, #!2d, #!3d", 4),
+    ENCODING_MAP(kThumb2Sbfx,       0xf3400000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
+                 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+                 "sbfx", "!0C, !1C, #!2d, #!3d", 4),
+    ENCODING_MAP(kThumb2LdrRRR,    0xf8500000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldr", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2LdrhRRR,    0xf8300000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2LdrshRRR,    0xf9300000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2LdrbRRR,    0xf8100000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2LdrsbRRR,    0xf9100000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 "ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2StrRRR,    0xf8400000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 "str", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2StrhRRR,    0xf8200000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 "strh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2StrbRRR,    0xf8000000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 "strb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+    ENCODING_MAP(kThumb2LdrhRRI12,       0xf8b00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrh", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2LdrshRRI12,       0xf9b00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrsh", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2LdrbRRI12,       0xf8900000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrb", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2LdrsbRRI12,       0xf9900000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrsb", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2StrhRRI12,       0xf8a00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "strh", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2StrbRRI12,       0xf8800000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 "strb", "!0C, [!1C, #!2d]", 4),
+    ENCODING_MAP(kThumb2Pop,           0xe8bd0000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
+                 | IS_LOAD | NEEDS_FIXUP, "pop", "<!0R>", 4),
+    ENCODING_MAP(kThumb2Push,          0xe92d0000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
+                 | IS_STORE | NEEDS_FIXUP, "push", "<!0R>", 4),
+    ENCODING_MAP(kThumb2CmpRI12, 0xf1b00f00,
+                 kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_USE0 | SETS_CCODES,
+                 "cmp", "!0C, #!1m", 4),
+    ENCODING_MAP(kThumb2AdcRRR,  0xeb500000, /* setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1,
+                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "adcs", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2AndRRR,  0xea000000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+                 "and", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2BicRRR,  0xea200000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+                 "bic", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2CmnRR,  0xeb000000,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "cmn", "!0C, !1C, shift !2d", 4),
+    ENCODING_MAP(kThumb2EorRRR,  0xea800000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+                 "eor", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2MulRRR,  0xfb00f000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2MnvRR,  0xea6f0000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "mvn", "!0C, !1C, shift !2d", 4),
+    ENCODING_MAP(kThumb2RsubRRI8,       0xf1d00000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "rsb", "!0C,!1C,#!2m", 4),
+    ENCODING_MAP(kThumb2NegRR,       0xf1d00000, /* instance of rsub */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "neg", "!0C,!1C", 4),
+    ENCODING_MAP(kThumb2OrrRRR,  0xea400000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+                 "orr", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2TstRR,       0xea100f00,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
+                 "tst", "!0C, !1C, shift !2d", 4),
+    ENCODING_MAP(kThumb2LslRRR,  0xfa00f000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "lsl", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2LsrRRR,  0xfa20f000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "lsr", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2AsrRRR,  0xfa40f000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "asr", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2RorRRR,  0xfa60f000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "ror", "!0C, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2LslRRI5,  0xea4f0000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "lsl", "!0C, !1C, #!2d", 4),
+    ENCODING_MAP(kThumb2LsrRRI5,  0xea4f0010,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "lsr", "!0C, !1C, #!2d", 4),
+    ENCODING_MAP(kThumb2AsrRRI5,  0xea4f0020,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "asr", "!0C, !1C, #!2d", 4),
+    ENCODING_MAP(kThumb2RorRRI5,  0xea4f0030,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "ror", "!0C, !1C, #!2d", 4),
+    ENCODING_MAP(kThumb2BicRRI8,  0xf0200000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "bic", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2AndRRI8,  0xf0000000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "and", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2OrrRRI8,  0xf0400000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "orr", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2EorRRI8,  0xf0800000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "eor", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2AddRRI8,  0xf1100000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "adds", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2AdcRRI8,  0xf1500000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
+                 "adcs", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2SubRRI8,  0xf1b00000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "subs", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2SbcRRI8,  0xf1700000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
+                 "sbcs", "!0C, !1C, #!2m", 4),
+    ENCODING_MAP(kThumb2It,  0xbf00,
+                 kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
+                 "it:!1b", "!0c", 2),
+    ENCODING_MAP(kThumb2Fmstat,  0xeef1fa10,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES,
+                 "fmstat", "", 4),
+    ENCODING_MAP(kThumb2Vcmpd,        0xeeb40b40,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+                 "vcmp.f64", "!0S, !1S", 4),
+    ENCODING_MAP(kThumb2Vcmps,        0xeeb40a40,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+                 "vcmp.f32", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2LdrPcRel12,       0xf8df0000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 "ldr", "!0C, [r15pc, #!1d]", 4),
+    ENCODING_MAP(kThumb2BCond,        0xf0008000,
+                 kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | IS_BRANCH | USES_CCODES | NEEDS_FIXUP,
+                 "b!1c", "!0t", 4),
+    ENCODING_MAP(kThumb2Vmovd_RR,       0xeeb00b40,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vmov.f64", "!0S, !1S", 4),
+    ENCODING_MAP(kThumb2Vmovs_RR,       0xeeb00a40,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vmov.f32", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2Fmrs,       0xee100a10,
+                 kFmtBitBlt, 15, 12, kFmtSfp, 7, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fmrs", "!0C, !1s", 4),
+    ENCODING_MAP(kThumb2Fmsr,       0xee000a10,
+                 kFmtSfp, 7, 16, kFmtBitBlt, 15, 12, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fmsr", "!0s, !1C", 4),
+    ENCODING_MAP(kThumb2Fmrrd,       0xec500b10,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtDfp, 5, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2,
+                 "fmrrd", "!0C, !1C, !2S", 4),
+    ENCODING_MAP(kThumb2Fmdrr,       0xec400b10,
+                 kFmtDfp, 5, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "fmdrr", "!0S, !1C, !2C", 4),
+    ENCODING_MAP(kThumb2Vabsd,       0xeeb00bc0,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vabs.f64", "!0S, !1S", 4),
+    ENCODING_MAP(kThumb2Vabss,       0xeeb00ac0,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vabs.f32", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2Vnegd,       0xeeb10b40,
+                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vneg.f64", "!0S, !1S", 4),
+    ENCODING_MAP(kThumb2Vnegs,       0xeeb10a40,
+                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "vneg.f32", "!0s, !1s", 4),
+    ENCODING_MAP(kThumb2Vmovs_IMM8,       0xeeb00a00,
+                 kFmtSfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "vmov.f32", "!0s, #0x!1h", 4),
+    ENCODING_MAP(kThumb2Vmovd_IMM8,       0xeeb00b00,
+                 kFmtDfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "vmov.f64", "!0S, #0x!1h", 4),
+    ENCODING_MAP(kThumb2Mla,  0xfb000000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtBitBlt, 15, 12,
+                 IS_QUAD_OP | REG_DEF0 | REG_USE1 | REG_USE2 | REG_USE3,
+                 "mla", "!0C, !1C, !2C, !3C", 4),
+    ENCODING_MAP(kThumb2Umull,  0xfba00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+                 kFmtBitBlt, 3, 0,
+                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
+                 "umull", "!0C, !1C, !2C, !3C", 4),
+    ENCODING_MAP(kThumb2Ldrex,       0xe8500f00,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 "ldrex", "!0C, [!1C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Strex,       0xe8400000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
+                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+                 "strex", "!0C,!1C, [!2C, #!2E]", 4),
+    ENCODING_MAP(kThumb2Clrex,       0xf3bf8f2f,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "clrex", "", 4),
+    ENCODING_MAP(kThumb2Bfi,         0xf3600000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtShift5, -1, -1,
+                 kFmtBitBlt, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+                 "bfi", "!0C,!1C,#!2d,#!3d", 4),
+    ENCODING_MAP(kThumb2Bfc,         0xf36f0000,
+                 kFmtBitBlt, 11, 8, kFmtShift5, -1, -1, kFmtBitBlt, 4, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
+                 "bfc", "!0C,#!1d,#!2d", 4),
+    ENCODING_MAP(kThumb2Dmb,         0xf3bf8f50,
+                 kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "dmb","#!0B",4),
+    ENCODING_MAP(kThumb2LdrPcReln12,       0xf85f0000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+                 "ldr", "!0C, [r15pc, -#!1d]", 4),
+    ENCODING_MAP(kThumb2Stm,          0xe9000000,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_USE0 | REG_USE_LIST1 | IS_STORE,
+                 "stm", "!0C, <!1R>", 4),
+    ENCODING_MAP(kThumbUndefined,       0xde00,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "undefined", "", 2),
+    // NOTE: vpop, vpush hard-encoded for s16+ reg list
+    ENCODING_MAP(kThumb2VPopCS,       0xecbd8a00,
+                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_FPCS_LIST0
+                 | IS_LOAD, "vpop", "<!0P>", 4),
+    ENCODING_MAP(kThumb2VPushCS,      0xed2d8a00,
+                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_FPCS_LIST0
+                 | IS_STORE, "vpush", "<!0P>", 4),
+    ENCODING_MAP(kThumb2Vldms,        0xec900a00,
+                 kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE0 | REG_DEF_FPCS_LIST2
+                 | IS_LOAD, "vldms", "!0C, <!2Q>", 4),
+    ENCODING_MAP(kThumb2Vstms,        0xec800a00,
+                 kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE0 | REG_USE_FPCS_LIST2
+                 | IS_STORE, "vstms", "!0C, <!2Q>", 4),
+    ENCODING_MAP(kThumb2BUncond,      0xf0009000,
+                 kFmtOff24, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
+                 "b", "!0t", 4),
+    ENCODING_MAP(kThumb2MovImm16H,       0xf2c00000,
+                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0,
+                 "movt", "!0C, #!1M", 4),
+    ENCODING_MAP(kThumb2AddPCR,      0x4487,
+                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_USE0 | IS_BRANCH,
+                 "add", "rPC, !0C", 2),
+    ENCODING_MAP(kThumb2Adr,         0xf20f0000,
+                 kFmtBitBlt, 11, 8, kFmtImm12, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 /* Note: doesn't affect flags */
+                 IS_TERTIARY_OP | REG_DEF0 | NEEDS_FIXUP,
+                 "adr", "!0C,#!1d", 4),
+    ENCODING_MAP(kThumb2MovImm16LST,     0xf2400000,
+                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | NEEDS_FIXUP,
+                 "mov", "!0C, #!1M", 4),
+    ENCODING_MAP(kThumb2MovImm16HST,     0xf2c00000,
+                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0 | NEEDS_FIXUP,
+                 "movt", "!0C, #!1M", 4),
+    ENCODING_MAP(kThumb2LdmiaWB,         0xe8b00000,
+                 kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+                 "ldmia", "!0C!!, <!1R>", 4),
+    ENCODING_MAP(kThumb2SubsRRI12,       0xf1b00000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 "subs", "!0C,!1C,#!2d", 4),
+    ENCODING_MAP(kThumb2OrrRRRs,  0xea500000,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "orrs", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2Push1,    0xf84d0d04,
+                 kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE0
+                 | IS_STORE, "push1", "!0C", 4),
+    ENCODING_MAP(kThumb2Pop1,    0xf85d0b04,
+                 kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF0
+                 | IS_LOAD, "pop1", "!0C", 4),
+    ENCODING_MAP(kThumb2RsubRRR,  0xebd00000, /* setflags encoding */
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtShift, -1, -1,
+                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+                 "rsbs", "!0C, !1C, !2C!3H", 4),
+    ENCODING_MAP(kThumb2Smull,  0xfb800000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+                 kFmtBitBlt, 3, 0,
+                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
+                 "smull", "!0C, !1C, !2C, !3C", 4),
+    ENCODING_MAP(kThumb2LdrdPcRel8,  0xe9df0000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 "ldrd", "!0C, !1C, [pc, #!2E]", 4),
+    ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+                 kFmtBitBlt, 7, 0,
+                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+                 "ldrd", "!0C, !1C, [!2C, #!3E]", 4),
+    ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
+                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+                 kFmtBitBlt, 7, 0,
+                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+                 "strd", "!0C, !1C, [!2C, #!3E]", 4),
+};
+
+/*
+ * The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
+ * not ready. Since r5FP is not updated often, it is less likely to
+ * generate unnecessary stall cycles.
+ * TUNING: No longer true - find new NOP pattern.
+ */
+#define PADDING_MOV_R5_R5               0x1C2D
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.
+ */
+AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr)
+{
+  LIR* lir;
+  AssemblerStatus res = kSuccess;  // Assume success
+
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+
+    if (lir->opcode < 0) {
+      /* 1 means padding is needed */
+      if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
+        code_buffer_.push_back(PADDING_MOV_R5_R5 & 0xFF);
+        code_buffer_.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+      }
+      continue;
+    }
+
+    if (lir->flags.is_nop) {
+      continue;
+    }
+
+    /*
+     * For PC-relative displacements we won't know if the
+     * selected instruction will work until late (i.e. - now).
+     * If something doesn't fit, we must replace the short-form
+     * operation with a longer-form one.  Note, though, that this
+     * can change code we've already processed, so we'll need to
+     * re-calculate offsets and restart.  To limit the number of
+     * restarts, the entire list will be scanned and patched.
+     * Of course, the patching itself may cause new overflows so this
+     * is an iterative process.
+     */
+    if (lir->flags.pcRelFixup) {
+      if (lir->opcode == kThumbLdrPcRel ||
+          lir->opcode == kThumb2LdrPcRel12 ||
+          lir->opcode == kThumbAddPcRel ||
+          lir->opcode == kThumb2LdrdPcRel8 ||
+          ((lir->opcode == kThumb2Vldrd) && (lir->operands[1] == r15pc)) ||
+          ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
+        /*
+         * PC-relative loads are mostly used to load immediates
+         * that are too large to materialize directly in one shot.
+         * However, if the load displacement exceeds the limit,
+         * we revert to a multiple-instruction materialization sequence.
+         */
+        LIR *lir_target = lir->target;
+        uintptr_t pc = (lir->offset + 4) & ~3;
+        uintptr_t target = lir_target->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        // First, a sanity check for cases we shouldn't see now
+        if (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
+            ((lir->opcode == kThumbLdrPcRel) && (delta > 1020))) {
+          // Shouldn't happen in current codegen.
+          LOG(FATAL) << "Unexpected pc-rel offset " << delta;
+        }
+        // Now, check for the difficult cases
+        if (((lir->opcode == kThumb2LdrPcRel12) && (delta > 4091)) ||
+            ((lir->opcode == kThumb2LdrdPcRel8) && (delta > 1020)) ||
+            ((lir->opcode == kThumb2Vldrs) && (delta > 1020)) ||
+            ((lir->opcode == kThumb2Vldrd) && (delta > 1020))) {
+          /*
+           * Note: because rARM_LR may be used to fix up out-of-range
+           * vldrs/vldrd we include REG_DEF_LR in the resource
+           * masks for these instructions.
+           */
+          int base_reg = ((lir->opcode == kThumb2LdrdPcRel8) || (lir->opcode == kThumb2LdrPcRel12))
+              ?  lir->operands[0] : rARM_LR;
+
+          // Add new Adr to generate the address.
+          LIR* new_adr = RawLIR(lir->dalvik_offset, kThumb2Adr,
+                     base_reg, 0, 0, 0, 0, lir->target);
+          InsertLIRBefore(lir, new_adr);
+
+          // Convert to normal load.
+          if (lir->opcode == kThumb2LdrPcRel12) {
+            lir->opcode = kThumb2LdrRRI12;
+          } else if (lir->opcode == kThumb2LdrdPcRel8) {
+            lir->opcode = kThumb2LdrdI8;
+          }
+          // Change the load to be relative to the new Adr base.
+          if (lir->opcode == kThumb2LdrdI8) {
+            lir->operands[3] = 0;
+            lir->operands[2] = base_reg;
+          } else {
+            lir->operands[2] = 0;
+            lir->operands[1] = base_reg;
+          }
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        } else {
+          if ((lir->opcode == kThumb2Vldrs) ||
+              (lir->opcode == kThumb2Vldrd) ||
+              (lir->opcode == kThumb2LdrdPcRel8)) {
+            lir->operands[2] = delta >> 2;
+          } else {
+            lir->operands[1] = (lir->opcode == kThumb2LdrPcRel12) ?  delta :
+                delta >> 2;
+          }
+        }
+      } else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        if (delta > 126 || delta < 0) {
+          /*
+           * Convert to cmp rx,#0 / b[eq/ne] tgt pair
+           * Make new branch instruction and insert after
+           */
+          LIR* new_inst =
+            RawLIR(lir->dalvik_offset, kThumbBCond, 0,
+                   (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
+                   0, 0, 0, lir->target);
+          InsertLIRAfter(lir, new_inst);
+          /* Convert the cb[n]z to a cmp rx, #0 ] */
+          lir->opcode = kThumbCmpRI8;
+          /* operand[0] is src1 in both cb[n]z & CmpRI8 */
+          lir->operands[1] = 0;
+          lir->target = 0;
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        } else {
+          lir->operands[1] = delta >> 1;
+        }
+      } else if (lir->opcode == kThumb2Push || lir->opcode == kThumb2Pop) {
+        if (__builtin_popcount(lir->operands[0]) == 1) {
+          /*
+           * The standard push/pop multiple instruction
+           * requires at least two registers in the list.
+           * If we've got just one, switch to the single-reg
+           * encoding.
+           */
+          lir->opcode = (lir->opcode == kThumb2Push) ? kThumb2Push1 :
+              kThumb2Pop1;
+          int reg = 0;
+          while (lir->operands[0]) {
+            if (lir->operands[0] & 0x1) {
+              break;
+            } else {
+              reg++;
+              lir->operands[0] >>= 1;
+            }
+          }
+          lir->operands[0] = reg;
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        }
+      } else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
+        LIR *target_lir = lir->target;
+        int delta = 0;
+        DCHECK(target_lir);
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        delta = target - pc;
+        if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
+          lir->opcode = kThumb2BCond;
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        }
+        lir->operands[0] = delta >> 1;
+      } else if (lir->opcode == kThumb2BUncond) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        lir->operands[0] = delta >> 1;
+        if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
+          lir->operands[0] == 0) {  // Useless branch
+          lir->flags.is_nop = true;
+          res = kRetryAll;
+        }
+      } else if (lir->opcode == kThumbBUncond) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        if (delta > 2046 || delta < -2048) {
+          // Convert to Thumb2BCond w/ kArmCondAl
+          lir->opcode = kThumb2BUncond;
+          lir->operands[0] = 0;
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        } else {
+          lir->operands[0] = delta >> 1;
+          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
+            lir->operands[0] == -1) {  // Useless branch
+            lir->flags.is_nop = true;
+            res = kRetryAll;
+          }
+        }
+      } else if (lir->opcode == kThumbBlx1) {
+        DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
+        /* cur_pc is Thumb */
+        uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
+        uintptr_t target = lir->operands[1];
+
+        /* Match bit[1] in target with base */
+        if (cur_pc & 0x2) {
+          target |= 0x2;
+        }
+        int delta = target - cur_pc;
+        DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+        lir->operands[0] = (delta >> 12) & 0x7ff;
+        NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+      } else if (lir->opcode == kThumbBl1) {
+        DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
+        /* Both cur_pc and target are Thumb */
+        uintptr_t cur_pc = start_addr + lir->offset + 4;
+        uintptr_t target = lir->operands[1];
+
+        int delta = target - cur_pc;
+        DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+        lir->operands[0] = (delta >> 12) & 0x7ff;
+        NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+      } else if (lir->opcode == kThumb2Adr) {
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+        LIR* target = lir->target;
+        int target_disp = tab_rec ? tab_rec->offset
+                    : target->offset;
+        int disp = target_disp - ((lir->offset + 4) & ~3);
+        if (disp < 4096) {
+          lir->operands[1] = disp;
+        } else {
+          // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
+          // TUNING: if this case fires often, it can be improved.  Not expected to be common.
+          LIR *new_mov16L =
+              RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
+                     lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
+                     reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+          InsertLIRBefore(lir, new_mov16L);
+          LIR *new_mov16H =
+              RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
+                     lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
+                     reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+          InsertLIRBefore(lir, new_mov16H);
+          if (ARM_LOWREG(lir->operands[0])) {
+            lir->opcode = kThumbAddRRLH;
+          } else {
+            lir->opcode = kThumbAddRRHH;
+          }
+          lir->operands[1] = rARM_PC;
+          SetupResourceMasks(lir);
+          res = kRetryAll;
+        }
+      } else if (lir->opcode == kThumb2MovImm16LST) {
+        // operands[1] should hold disp, [2] has add, [3] has tab_rec
+        LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        // If tab_rec is null, this is a literal load. Use target
+        LIR* target = lir->target;
+        int target_disp = tab_rec ? tab_rec->offset : target->offset;
+        lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
+      } else if (lir->opcode == kThumb2MovImm16HST) {
+        // operands[1] should hold disp, [2] has add, [3] has tab_rec
+        LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        // If tab_rec is null, this is a literal load. Use target
+        LIR* target = lir->target;
+        int target_disp = tab_rec ? tab_rec->offset : target->offset;
+        lir->operands[1] =
+            ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
+      }
+    }
+    /*
+     * If one of the pc-relative instructions expanded we'll have
+     * to make another pass.  Don't bother to fully assemble the
+     * instruction.
+     */
+    if (res != kSuccess) {
+      continue;
+    }
+    const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
+    uint32_t bits = encoder->skeleton;
+    int i;
+    for (i = 0; i < 4; i++) {
+      uint32_t operand;
+      uint32_t value;
+      operand = lir->operands[i];
+      switch (encoder->field_loc[i].kind) {
+        case kFmtUnused:
+          break;
+        case kFmtFPImm:
+          value = ((operand & 0xF0) >> 4) << encoder->field_loc[i].end;
+          value |= (operand & 0x0F) << encoder->field_loc[i].start;
+          bits |= value;
+          break;
+        case kFmtBrOffset:
+          value = ((operand  & 0x80000) >> 19) << 26;
+          value |= ((operand & 0x40000) >> 18) << 11;
+          value |= ((operand & 0x20000) >> 17) << 13;
+          value |= ((operand & 0x1f800) >> 11) << 16;
+          value |= (operand  & 0x007ff);
+          bits |= value;
+          break;
+        case kFmtShift5:
+          value = ((operand & 0x1c) >> 2) << 12;
+          value |= (operand & 0x03) << 6;
+          bits |= value;
+          break;
+        case kFmtShift:
+          value = ((operand & 0x70) >> 4) << 12;
+          value |= (operand & 0x0f) << 4;
+          bits |= value;
+          break;
+        case kFmtBWidth:
+          value = operand - 1;
+          bits |= value;
+          break;
+        case kFmtLsb:
+          value = ((operand & 0x1c) >> 2) << 12;
+          value |= (operand & 0x03) << 6;
+          bits |= value;
+          break;
+        case kFmtImm6:
+          value = ((operand & 0x20) >> 5) << 9;
+          value |= (operand & 0x1f) << 3;
+          bits |= value;
+          break;
+        case kFmtBitBlt:
+          value = (operand << encoder->field_loc[i].start) &
+              ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          bits |= value;
+          break;
+        case kFmtDfp: {
+          DCHECK(ARM_DOUBLEREG(operand));
+          DCHECK_EQ((operand & 0x1), 0U);
+          int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+          /* Snag the 1-bit slice and position it */
+          value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
+          /* Extract and position the 4-bit slice */
+          value |= (reg_name & 0x0f) << encoder->field_loc[i].start;
+          bits |= value;
+          break;
+        }
+        case kFmtSfp:
+          DCHECK(ARM_SINGLEREG(operand));
+          /* Snag the 1-bit slice and position it */
+          value = (operand & 0x1) << encoder->field_loc[i].end;
+          /* Extract and position the 4-bit slice */
+          value |= ((operand & 0x1e) >> 1) << encoder->field_loc[i].start;
+          bits |= value;
+          break;
+        case kFmtImm12:
+        case kFmtModImm:
+          value = ((operand & 0x800) >> 11) << 26;
+          value |= ((operand & 0x700) >> 8) << 12;
+          value |= operand & 0x0ff;
+          bits |= value;
+          break;
+        case kFmtImm16:
+          value = ((operand & 0x0800) >> 11) << 26;
+          value |= ((operand & 0xf000) >> 12) << 16;
+          value |= ((operand & 0x0700) >> 8) << 12;
+          value |= operand & 0x0ff;
+          bits |= value;
+          break;
+        case kFmtOff24: {
+          uint32_t signbit = (operand >> 31) & 0x1;
+          uint32_t i1 = (operand >> 22) & 0x1;
+          uint32_t i2 = (operand >> 21) & 0x1;
+          uint32_t imm10 = (operand >> 11) & 0x03ff;
+          uint32_t imm11 = operand & 0x07ff;
+          uint32_t j1 = (i1 ^ signbit) ? 0 : 1;
+          uint32_t j2 = (i2 ^ signbit) ? 0 : 1;
+          value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
+              imm11;
+          bits |= value;
+          }
+          break;
+        default:
+          LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
+      }
+    }
+    if (encoder->size == 4) {
+      code_buffer_.push_back((bits >> 16) & 0xff);
+      code_buffer_.push_back((bits >> 24) & 0xff);
+    }
+    code_buffer_.push_back(bits & 0xff);
+    code_buffer_.push_back((bits >> 8) & 0xff);
+  }
+  return res;
+}
+
+int ArmMir2Lir::GetInsnSize(LIR* lir)
+{
+  return EncodingMap[lir->opcode].size;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
new file mode 100644
index 0000000..a6720ce
--- /dev/null
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Thumb2 ISA. */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+
+/* Return the position of an ssa name within the argument list */
+int ArmMir2Lir::InPosition(int s_reg)
+{
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  return v_reg - cu_->num_regs;
+}
+
+/*
+ * Describe an argument.  If it's already in an arg register, just leave it
+ * there.  NOTE: all live arg registers must be locked prior to this call
+ * to avoid having them allocated as a temp by downstream utilities.
+ */
+RegLocation ArmMir2Lir::ArgLoc(RegLocation loc)
+{
+  int arg_num = InPosition(loc.s_reg_low);
+  if (loc.wide) {
+    if (arg_num == 2) {
+      // Bad case - half in register, half in frame.  Just punt
+      loc.location = kLocInvalid;
+    } else if (arg_num < 2) {
+      loc.low_reg = rARM_ARG1 + arg_num;
+      loc.high_reg = loc.low_reg + 1;
+      loc.location = kLocPhysReg;
+    } else {
+      loc.location = kLocDalvikFrame;
+    }
+  } else {
+    if (arg_num < 3) {
+      loc.low_reg = rARM_ARG1 + arg_num;
+      loc.location = kLocPhysReg;
+    } else {
+      loc.location = kLocDalvikFrame;
+    }
+  }
+  return loc;
+}
+
+/*
+ * Load an argument.  If already in a register, just return.  If in
+ * the frame, we can't use the normal LoadValue() because it assumed
+ * a proper frame - and we're frameless.
+ */
+RegLocation ArmMir2Lir::LoadArg(RegLocation loc)
+{
+  if (loc.location == kLocDalvikFrame) {
+    int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t);
+    loc.low_reg = AllocTemp();
+    LoadWordDisp(rARM_SP, start, loc.low_reg);
+    if (loc.wide) {
+      loc.high_reg = AllocTemp();
+      LoadWordDisp(rARM_SP, start + sizeof(uint32_t), loc.high_reg);
+    }
+    loc.location = kLocPhysReg;
+  }
+  return loc;
+}
+
+/* Lock any referenced arguments that arrive in registers */
+void ArmMir2Lir::LockLiveArgs(MIR* mir)
+{
+  int first_in = cu_->num_regs;
+  const int num_arg_regs = 3;  // TODO: generalize & move to RegUtil.cc
+  for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+    int v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[i]);
+    int InPosition = v_reg - first_in;
+    if (InPosition < num_arg_regs) {
+      LockTemp(rARM_ARG1 + InPosition);
+    }
+  }
+}
+
+/* Find the next MIR, which may be in a following basic block */
+// TODO: should this be a utility in mir_graph?
+MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir)
+{
+  BasicBlock* bb = *p_bb;
+  MIR* orig_mir = mir;
+  while (bb != NULL) {
+    if (mir != NULL) {
+      mir = mir->next;
+    }
+    if (mir != NULL) {
+      return mir;
+    } else {
+      bb = bb->fall_through;
+      *p_bb = bb;
+      if (bb) {
+         mir = bb->first_mir_insn;
+         if (mir != NULL) {
+           return mir;
+         }
+      }
+    }
+  }
+  return orig_mir;
+}
+
+/* Used for the "verbose" listing */
+//TODO:  move to common code
+void ArmMir2Lir::GenPrintLabel(MIR* mir)
+{
+  /* Mark the beginning of a Dalvik instruction for line tracking */
+  char* inst_str = cu_->verbose ?
+     mir_graph_->GetDalvikDisassembly(mir) : NULL;
+  MarkBoundary(mir->offset, inst_str);
+}
+
+MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
+                             OpSize size, bool long_or_double, bool is_object)
+{
+  int field_offset;
+  bool is_volatile;
+  uint32_t field_idx = mir->dalvikInsn.vC;
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+  if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+    return NULL;
+  }
+  RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
+  LockLiveArgs(mir);
+  rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
+  RegLocation rl_dest;
+  if (long_or_double) {
+    rl_dest = GetReturnWide(false);
+  } else {
+    rl_dest = GetReturn(false);
+  }
+  // Point of no return - no aborts after this
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_obj = LoadArg(rl_obj);
+  GenIGet(field_idx, mir->optimization_flags, size, rl_dest, rl_obj, long_or_double, is_object);
+  return GetNextMir(bb, mir);
+}
+
+MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
+                             OpSize size, bool long_or_double, bool is_object)
+{
+  int field_offset;
+  bool is_volatile;
+  uint32_t field_idx = mir->dalvikInsn.vC;
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+  if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+    return NULL;
+  }
+  RegLocation rl_src;
+  RegLocation rl_obj;
+  LockLiveArgs(mir);
+  if (long_or_double) {
+    rl_src = mir_graph_->GetSrcWide(mir, 0);
+    rl_obj = mir_graph_->GetSrc(mir, 2);
+  } else {
+    rl_src = mir_graph_->GetSrc(mir, 0);
+    rl_obj = mir_graph_->GetSrc(mir, 1);
+  }
+  rl_src = ArmMir2Lir::ArgLoc(rl_src);
+  rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
+  // Reject if source is split across registers & frame
+  if (rl_obj.location == kLocInvalid) {
+    ResetRegPool();
+    return NULL;
+  }
+  // Point of no return - no aborts after this
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_obj = LoadArg(rl_obj);
+  rl_src = LoadArg(rl_src);
+  GenIPut(field_idx, mir->optimization_flags, size, rl_src, rl_obj, long_or_double, is_object);
+  return GetNextMir(bb, mir);
+}
+
+MIR* ArmMir2Lir::SpecialIdentity(MIR* mir)
+{
+  RegLocation rl_src;
+  RegLocation rl_dest;
+  bool wide = (mir->ssa_rep->num_uses == 2);
+  if (wide) {
+    rl_src = mir_graph_->GetSrcWide(mir, 0);
+    rl_dest = GetReturnWide(false);
+  } else {
+    rl_src = mir_graph_->GetSrc(mir, 0);
+    rl_dest = GetReturn(false);
+  }
+  LockLiveArgs(mir);
+  rl_src = ArmMir2Lir::ArgLoc(rl_src);
+  if (rl_src.location == kLocInvalid) {
+    ResetRegPool();
+    return NULL;
+  }
+  // Point of no return - no aborts after this
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_src = LoadArg(rl_src);
+  if (wide) {
+    StoreValueWide(rl_dest, rl_src);
+  } else {
+    StoreValue(rl_dest, rl_src);
+  }
+  return mir;
+}
+
+/*
+ * Special-case code genration for simple non-throwing leaf methods.
+ */
+void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case)
+{
+   current_dalvik_offset_ = mir->offset;
+   MIR* next_mir = NULL;
+   switch (special_case) {
+     case kNullMethod:
+       DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
+       next_mir = mir;
+       break;
+     case kConstFunction:
+       ArmMir2Lir::GenPrintLabel(mir);
+       LoadConstant(rARM_RET0, mir->dalvikInsn.vB);
+       next_mir = GetNextMir(&bb, mir);
+       break;
+     case kIGet:
+       next_mir = SpecialIGet(&bb, mir, kWord, false, false);
+       break;
+     case kIGetBoolean:
+     case kIGetByte:
+       next_mir = SpecialIGet(&bb, mir, kUnsignedByte, false, false);
+       break;
+     case kIGetObject:
+       next_mir = SpecialIGet(&bb, mir, kWord, false, true);
+       break;
+     case kIGetChar:
+       next_mir = SpecialIGet(&bb, mir, kUnsignedHalf, false, false);
+       break;
+     case kIGetShort:
+       next_mir = SpecialIGet(&bb, mir, kSignedHalf, false, false);
+       break;
+     case kIGetWide:
+       next_mir = SpecialIGet(&bb, mir, kLong, true, false);
+       break;
+     case kIPut:
+       next_mir = SpecialIPut(&bb, mir, kWord, false, false);
+       break;
+     case kIPutBoolean:
+     case kIPutByte:
+       next_mir = SpecialIPut(&bb, mir, kUnsignedByte, false, false);
+       break;
+     case kIPutObject:
+       next_mir = SpecialIPut(&bb, mir, kWord, false, true);
+       break;
+     case kIPutChar:
+       next_mir = SpecialIPut(&bb, mir, kUnsignedHalf, false, false);
+       break;
+     case kIPutShort:
+       next_mir = SpecialIPut(&bb, mir, kSignedHalf, false, false);
+       break;
+     case kIPutWide:
+       next_mir = SpecialIPut(&bb, mir, kLong, true, false);
+       break;
+     case kIdentity:
+       next_mir = SpecialIdentity(mir);
+       break;
+     default:
+       return;
+   }
+   if (next_mir != NULL) {
+    current_dalvik_offset_ = next_mir->offset;
+    if (special_case != kIdentity) {
+      ArmMir2Lir::GenPrintLabel(next_mir);
+    }
+    NewLIR1(kThumbBx, rARM_LR);
+    core_spill_mask_ = 0;
+    num_core_spills_ = 0;
+    fp_spill_mask_ = 0;
+    num_fp_spills_ = 0;
+    frame_size_ = 0;
+    core_vmap_table_.clear();
+    fp_vmap_table_.clear();
+  }
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs.  For each set, we'll load them as a pair using ldmia.
+ * This means that the register number of the temp we use for the key
+ * must be lower than the reg for the displacement.
+ *
+ * The test loop will look something like:
+ *
+ *   adr   rBase, <table>
+ *   ldr   r_val, [rARM_SP, v_reg_off]
+ *   mov   r_idx, #table_size
+ * lp:
+ *   ldmia rBase!, {r_key, r_disp}
+ *   sub   r_idx, #1
+ *   cmp   r_val, r_key
+ *   ifeq
+ *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
+ *   cbnz  r_idx, lp
+ */
+void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpSparseSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // Get the switch value
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int rBase = AllocTemp();
+  /* Allocate key and disp temps */
+  int r_key = AllocTemp();
+  int r_disp = AllocTemp();
+  // Make sure r_key's register number is less than r_disp's number for ldmia
+  if (r_key > r_disp) {
+    int tmp = r_disp;
+    r_disp = r_key;
+    r_key = tmp;
+  }
+  // Materialize a pointer to the switch table
+  NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  // Set up r_idx
+  int r_idx = AllocTemp();
+  LoadConstant(r_idx, size);
+  // Establish loop branch target
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  // Load next key/disp
+  NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
+  OpRegReg(kOpCmp, r_key, rl_src.low_reg);
+  // Go if match. NOTE: No instruction set switch here - must stay Thumb2
+  OpIT(kCondEq, "");
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
+  tab_rec->anchor = switch_branch;
+  // Needs to use setflags encoding here
+  NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1);
+  OpCondBranch(kCondNe, target);
+}
+
+
+void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpPackedSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  tab_rec->targets =
+      static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // Get the switch value
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int table_base = AllocTemp();
+  // Materialize a pointer to the switch table
+  NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  int low_key = s4FromSwitchData(&table[2]);
+  int keyReg;
+  // Remove the bias, if necessary
+  if (low_key == 0) {
+    keyReg = rl_src.low_reg;
+  } else {
+    keyReg = AllocTemp();
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+  }
+  // Bounds check - if < 0 or >= size continue following switch
+  OpRegImm(kOpCmp, keyReg, size-1);
+  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+
+  // Load the displacement from the switch table
+  int disp_reg = AllocTemp();
+  LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
+
+  // ..and go! NOTE: No instruction set switch here - must stay Thumb2
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg);
+  tab_rec->anchor = switch_branch;
+
+  /* branch_over target here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ *  ushort ident = 0x0300   magic value
+ *  ushort width            width of each element in the table
+ *  uint   size             number of elements in the table
+ *  ubyte  data[size*width] table of data values (may contain a single-byte
+ *                          padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  // Add the table to the list - we'll process it later
+  FillArrayData *tab_rec =
+      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                 ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  uint16_t width = tab_rec->table[1];
+  uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+  tab_rec->size = (size * width) + 8;
+
+  fill_array_data_.Insert(tab_rec);
+
+  // Making a call - use explicit registers
+  FlushAllRegs();   /* Everything to home location */
+  LoadValueDirectFixed(rl_src, r0);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+               rARM_LR);
+  // Materialize a pointer to the fill data image
+  NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
+}
+
+/*
+ * Handle simple case (thin lock) inline.  If it's complicated, bail
+ * out to the heavyweight lock/unlock routines.  We'll use dedicated
+ * registers here in order to be in the right position in case we
+ * to bail to oat[Lock/Unlock]Object(self, object)
+ *
+ * r0 -> self pointer [arg0 for oat[Lock/Unlock]Object
+ * r1 -> object [arg1 for oat[Lock/Unlock]Object
+ * r2 -> intial contents of object->lock, later result of strex
+ * r3 -> self->thread_id
+ * r12 -> allow to be used by utilities as general temp
+ *
+ * The result of the strex is 0 if we acquire the lock.
+ *
+ * See comments in monitor.cc for the layout of the lock word.
+ * Of particular interest to this code is the test for the
+ * simple case - which we handle inline.  For monitor enter, the
+ * simple case is thin lock, held by no-one.  For monitor exit,
+ * the simple case is thin lock, held by the unlocking thread with
+ * a recurse count of 0.
+ *
+ * A minor complication is that there is a field in the lock word
+ * unrelated to locking: the hash state.  This field must be ignored, but
+ * preserved.
+ *
+ */
+void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  DCHECK_EQ(LW_SHAPE_THIN, 0);
+  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+  LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+  NewLIR3(kThumb2Ldrex, r1, r0,
+          mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
+  // Align owner
+  OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+  // Is lock unheld on lock or held by us (==thread_id) on unlock?
+  NewLIR4(kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+  NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+  OpRegImm(kOpCmp, r1, 0);
+  OpIT(kCondEq, "");
+  NewLIR4(kThumb2Strex, r1, r2, r0,
+          mirror::Object::MonitorOffset().Int32Value() >> 2);
+  OpRegImm(kOpCmp, r1, 0);
+  OpIT(kCondNe, "T");
+  // Go expensive route - artLockObjectFromCode(self, obj);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
+  GenMemBarrier(kLoadLoad);
+}
+
+/*
+ * For monitor unlock, we don't have to use ldrex/strex.  Once
+ * we've determined that the lock is thin and that we own it with
+ * a zero recursion count, it's safe to punch it back to the
+ * initial, unlock thin state with a store word.
+ */
+void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
+{
+  DCHECK_EQ(LW_SHAPE_THIN, 0);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+  LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+  LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+  // Is lock unheld on lock or held by us (==thread_id) on unlock?
+  OpRegRegImm(kOpAnd, r3, r1,
+              (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+  // Align owner
+  OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+  NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+  OpRegReg(kOpSub, r1, r2);
+  OpIT(kCondEq, "EE");
+  StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+  // Go expensive route - UnlockObjectFromCode(obj);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
+  GenMemBarrier(kStoreLoad);
+}
+
+void ArmMir2Lir::GenMoveException(RegLocation rl_dest)
+{
+  int ex_offset = Thread::ExceptionOffset().Int32Value();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int reset_reg = AllocTemp();
+  LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
+  LoadConstant(reset_reg, 0);
+  StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
+  FreeTemp(reset_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
+{
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
+                   kUnsignedByte);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
+}
+
+void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
+{
+  int spill_count = num_core_spills_ + num_fp_spills_;
+  /*
+   * On entry, r0, r1, r2 & r3 are live.  Let the register allocation
+   * mechanism know so it doesn't try to use any of them when
+   * expanding the frame or flushing.  This leaves the utility
+   * code with a single temp: r12.  This should be enough.
+   */
+  LockTemp(r0);
+  LockTemp(r1);
+  LockTemp(r2);
+  LockTemp(r3);
+
+  /*
+   * We can safely skip the stack overflow check if we're
+   * a leaf *and* our frame size < fudge factor.
+   */
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+                            (static_cast<size_t>(frame_size_) <
+                            Thread::kStackOverflowReservedBytes));
+  NewLIR0(kPseudoMethodEntry);
+  if (!skip_overflow_check) {
+    /* Load stack limit */
+    LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+  }
+  /* Spill core callee saves */
+  NewLIR1(kThumb2Push, core_spill_mask_);
+  /* Need to spill any FP regs? */
+  if (num_fp_spills_) {
+    /*
+     * NOTE: fp spills are a little different from core spills in that
+     * they are pushed as a contiguous block.  When promoting from
+     * the fp set, we must allocate all singles from s16..highest-promoted
+     */
+    NewLIR1(kThumb2VPushCS, num_fp_spills_);
+  }
+  if (!skip_overflow_check) {
+    OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
+    GenRegRegCheck(kCondCc, rARM_LR, r12, kThrowStackOverflow);
+    OpRegCopy(rARM_SP, rARM_LR);     // Establish stack
+  } else {
+    OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
+  }
+
+  FlushIns(ArgLocs, rl_method);
+
+  FreeTemp(r0);
+  FreeTemp(r1);
+  FreeTemp(r2);
+  FreeTemp(r3);
+}
+
+void ArmMir2Lir::GenExitSequence()
+{
+  int spill_count = num_core_spills_ + num_fp_spills_;
+  /*
+   * In the exit path, r0/r1 are live - make sure they aren't
+   * allocated by the register utilities as temps.
+   */
+  LockTemp(r0);
+  LockTemp(r1);
+
+  NewLIR0(kPseudoMethodExit);
+  OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4));
+  /* Need to restore any FP callee saves? */
+  if (num_fp_spills_) {
+    NewLIR1(kThumb2VPopCS, num_fp_spills_);
+  }
+  if (core_spill_mask_ & (1 << rARM_LR)) {
+    /* Unspill rARM_LR to rARM_PC */
+    core_spill_mask_ &= ~(1 << rARM_LR);
+    core_spill_mask_ |= (1 << rARM_PC);
+  }
+  NewLIR1(kThumb2Pop, core_spill_mask_);
+  if (!(core_spill_mask_ & (1 << rARM_PC))) {
+    /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
+    NewLIR1(kThumbBx, rARM_LR);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
new file mode 100644
index 0000000..a9199df
--- /dev/null
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
+#define ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+class ArmMir2Lir : public Mir2Lir {
+  public:
+    ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+    // Required for target - codegen helpers.
+    bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    int LoadHelper(int offset);
+    LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                          int s_reg);
+    LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                             int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    LIR* LoadConstantNoClobber(int r_dest, int value);
+    LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                              int r_src, int r_src_hi, OpSize size, int s_reg);
+    void MarkGCCard(int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    bool IsFpReg(int reg);
+    bool SameRegType(int reg1, int reg2);
+    int AllocTypedTemp(bool fp_hint, int reg_class);
+    int AllocTypedTempPair(bool fp_hint, int reg_class);
+    int S2d(int low_reg, int high_reg);
+    int TargetReg(SpecialTargetRegister reg);
+    RegisterInfo* GetRegInfo(int reg);
+    RegLocation GetReturnAlt();
+    RegLocation GetReturnWideAlt();
+    RegLocation LocCReturn();
+    RegLocation LocCReturnDouble();
+    RegLocation LocCReturnFloat();
+    RegLocation LocCReturnWide();
+    uint32_t FpRegMask();
+    uint64_t GetRegMaskCommon(int reg);
+    void AdjustSpillMask();
+    void ClobberCalleeSave();
+    void FlushReg(int reg);
+    void FlushRegWide(int reg1, int reg2);
+    void FreeCallTemps();
+    void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    void LockCallTemps();
+    void MarkPreservedSingle(int v_reg, int reg);
+    void CompilerInitializeRegAlloc();
+
+    // Required for target - miscellaneous.
+    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    void SetupTargetResourceMasks(LIR* lir);
+    const char* GetTargetInstFmt(int opcode);
+    const char* GetTargetInstName(int opcode);
+    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    uint64_t GetPCUseDefEncoding();
+    uint64_t GetTargetInstFlags(int opcode);
+    int GetInsnSize(LIR* lir);
+    bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2);
+    void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+                        RegLocation rl_src, int scale);
+    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_dest, int scale);
+    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_shift);
+    void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src1, RegLocation rl_src2);
+    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                  RegLocation rl_src2);
+    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    bool GenInlinedSqrt(CallInfo* info);
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenDivZeroCheck(int reg_lo, int reg_hi);
+    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    void GenExitSequence();
+    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    void GenSelect(BasicBlock* bb, MIR* mir);
+    void GenMemBarrier(MemBarrierKind barrier_kind);
+    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    void GenMoveException(RegLocation rl_dest);
+    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit);
+    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+
+    // Required for target - single operation generators.
+    LIR* OpUnconditionalBranch(LIR* target);
+    LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    LIR* OpFpRegCopy(int r_dest, int r_src);
+    LIR* OpIT(ConditionCode cond, const char* guide);
+    LIR* OpMem(OpKind op, int rBase, int disp);
+    LIR* OpPcRelLoad(int reg, LIR* target);
+    LIR* OpReg(OpKind op, int r_dest_src);
+    LIR* OpRegCopy(int r_dest, int r_src);
+    LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    LIR* OpTestSuspend(LIR* target);
+    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpVldm(int rBase, int count);
+    LIR* OpVstm(int rBase, int count);
+    void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    void OpTlsCmp(int offset, int val);
+
+    RegLocation ArgLoc(RegLocation loc);
+    LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+                          int s_reg);
+    LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+    void GenPrintLabel(MIR* mir);
+    LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift);
+    LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift);
+    static const ArmEncodingMap EncodingMap[kArmLast];
+    int EncodeShift(int code, int amount);
+    int ModifiedImmediate(uint32_t value);
+    ArmConditionCode ArmConditionEncoding(ConditionCode code);
+    bool InexpensiveConstantInt(int32_t value);
+    bool InexpensiveConstantFloat(int32_t value);
+    bool InexpensiveConstantLong(int64_t value);
+    bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
+                                  ConditionCode ccode);
+    int InPosition(int s_reg);
+    RegLocation LoadArg(RegLocation loc);
+    void LockLiveArgs(MIR* mir);
+    MIR* GetNextMir(BasicBlock** p_bb, MIR* mir);
+    MIR* SpecialIGet(BasicBlock** bb, MIR* mir, OpSize size, bool long_or_double, bool is_object);
+    MIR* SpecialIPut(BasicBlock** bb, MIR* mir, OpSize size, bool long_or_double, bool is_object);
+    MIR* SpecialIdentity(MIR* mir);
+    LIR* LoadFPConstantValue(int r_dest, int value);
+    bool BadOverlap(RegLocation rl_src, RegLocation rl_dest);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
new file mode 100644
index 0000000..53a5e1a
--- /dev/null
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/quick/mir_to_lir-inl.h"
+
+namespace art {
+
+void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2)
+{
+  int op = kThumbBkpt;
+  RegLocation rl_result;
+
+  /*
+   * Don't attempt to optimize register usage since these opcodes call out to
+   * the handlers.
+   */
+  switch (opcode) {
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_FLOAT:
+      op = kThumb2Vadds;
+      break;
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT:
+      op = kThumb2Vsubs;
+      break;
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT:
+      op = kThumb2Vdivs;
+      break;
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT:
+      op = kThumb2Vmuls;
+      break;
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_FLOAT:
+      GenNegFloat(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  int op = kThumbBkpt;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::ADD_DOUBLE:
+      op = kThumb2Vaddd;
+      break;
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE:
+      op = kThumb2Vsubd;
+      break;
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE:
+      op = kThumb2Vdivd;
+      break;
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE:
+      op = kThumb2Vmuld;
+      break;
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_DOUBLE:
+      GenNegDouble(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
+  DCHECK(rl_src1.wide);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
+  DCHECK(rl_src2.wide);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_result.wide);
+  NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+          S2d(rl_src2.low_reg, rl_src2.high_reg));
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenConversion(Instruction::Code opcode,
+                               RegLocation rl_dest, RegLocation rl_src)
+{
+  int op = kThumbBkpt;
+  int src_reg;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::INT_TO_FLOAT:
+      op = kThumb2VcvtIF;
+      break;
+    case Instruction::FLOAT_TO_INT:
+      op = kThumb2VcvtFI;
+      break;
+    case Instruction::DOUBLE_TO_FLOAT:
+      op = kThumb2VcvtDF;
+      break;
+    case Instruction::FLOAT_TO_DOUBLE:
+      op = kThumb2VcvtFd;
+      break;
+    case Instruction::INT_TO_DOUBLE:
+      op = kThumb2VcvtID;
+      break;
+    case Instruction::DOUBLE_TO_INT:
+      op = kThumb2VcvtDI;
+      break;
+    case Instruction::LONG_TO_DOUBLE:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      return;
+    case Instruction::FLOAT_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      return;
+    case Instruction::LONG_TO_FLOAT:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      return;
+    case Instruction::DOUBLE_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  if (rl_src.wide) {
+    rl_src = LoadValueWide(rl_src, kFPReg);
+    src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+  } else {
+    rl_src = LoadValue(rl_src, kFPReg);
+    src_reg = rl_src.low_reg;
+  }
+  if (rl_dest.wide) {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double)
+{
+  LIR* target = &block_label_list_[bb->taken->id];
+  RegLocation rl_src1;
+  RegLocation rl_src2;
+  if (is_double) {
+    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+            S2d(rl_src2.low_reg, rl_src2.high_reg));
+  } else {
+    rl_src1 = mir_graph_->GetSrc(mir, 0);
+    rl_src2 = mir_graph_->GetSrc(mir, 1);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+  }
+  NewLIR0(kThumb2Fmstat);
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  switch(ccode) {
+    case kCondEq:
+    case kCondNe:
+      break;
+    case kCondLt:
+      if (gt_bias) {
+        ccode = kCondMi;
+      }
+      break;
+    case kCondLe:
+      if (gt_bias) {
+        ccode = kCondLs;
+      }
+      break;
+    case kCondGt:
+      if (gt_bias) {
+        ccode = kCondHi;
+      }
+      break;
+    case kCondGe:
+      if (gt_bias) {
+        ccode = kCondCs;
+      }
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCondBranch(ccode, target);
+}
+
+
+void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2)
+{
+  bool is_double = false;
+  int default_result = -1;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::CMPL_FLOAT:
+      is_double = false;
+      default_result = -1;
+      break;
+    case Instruction::CMPG_FLOAT:
+      is_double = false;
+      default_result = 1;
+      break;
+    case Instruction::CMPL_DOUBLE:
+      is_double = true;
+      default_result = -1;
+      break;
+    case Instruction::CMPG_DOUBLE:
+      is_double = true;
+      default_result = 1;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  if (is_double) {
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
+    ClobberSReg(rl_dest.s_reg_low);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadConstant(rl_result.low_reg, default_result);
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+            S2d(rl_src2.low_reg, rl_src2.high_reg));
+  } else {
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
+    ClobberSReg(rl_dest.s_reg_low);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadConstant(rl_result.low_reg, default_result);
+    NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+  }
+  DCHECK(!ARM_FPREG(rl_result.low_reg));
+  NewLIR0(kThumb2Fmstat);
+
+  OpIT((default_result == -1) ? kCondGt : kCondMi, "");
+  NewLIR2(kThumb2MovImmShift, rl_result.low_reg,
+          ModifiedImmediate(-default_result)); // Must not alter ccodes
+  GenBarrier();
+
+  OpIT(kCondEq, "");
+  LoadConstant(rl_result.low_reg, 0);
+  GenBarrier();
+
+  StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValue(rl_src, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+          S2d(rl_src.low_reg, rl_src.high_reg));
+  StoreValueWide(rl_dest, rl_result);
+}
+
+bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
+  LIR *branch;
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+          S2d(rl_src.low_reg, rl_src.high_reg));
+  NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+          S2d(rl_result.low_reg, rl_result.high_reg));
+  NewLIR0(kThumb2Fmstat);
+  branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt));
+  NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+  NewLIR1(kThumbBlxR, r_tgt);
+  NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+
+}  // namespace art
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
new file mode 100644
index 0000000..feea896
--- /dev/null
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Thumb2 ISA. */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mirror/array.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1,
+         int src2, LIR* target)
+{
+  OpRegReg(kOpCmp, src1, src2);
+  return OpCondBranch(cond, target);
+}
+
+/*
+ * Generate a Thumb2 IT instruction, which can nullify up to
+ * four subsequent instructions based on a condition and its
+ * inverse.  The condition applies to the first instruction, which
+ * is executed if the condition is met.  The string "guide" consists
+ * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
+ * A "T" means the instruction is executed if the condition is
+ * met, and an "E" means the instruction is executed if the condition
+ * is not met.
+ */
+LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide)
+{
+  int mask;
+  int mask3 = 0;
+  int mask2 = 0;
+  int mask1 = 0;
+  ArmConditionCode code = ArmConditionEncoding(ccode);
+  int cond_bit = code & 1;
+  int alt_bit = cond_bit ^ 1;
+
+  //Note: case fallthroughs intentional
+  switch (strlen(guide)) {
+    case 3:
+      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
+    case 2:
+      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
+    case 1:
+      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
+      break;
+    case 0:
+      break;
+    default:
+      LOG(FATAL) << "OAT: bad case in OpIT";
+  }
+  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
+       (1 << (3 - strlen(guide)));
+  return NewLIR2(kThumb2It, code, mask);
+}
+
+/*
+ * 64-bit 3way compare function.
+ *     mov   rX, #-1
+ *     cmp   op1hi, op2hi
+ *     blt   done
+ *     bgt   flip
+ *     sub   rX, op1lo, op2lo (treat as unsigned)
+ *     beq   done
+ *     ite   hi
+ *     mov(hi)   rX, #-1
+ *     mov(!hi)  rX, #1
+ * flip:
+ *     neg   rX
+ * done:
+ */
+void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LIR* target1;
+  LIR* target2;
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  int t_reg = AllocTemp();
+  LoadConstant(t_reg, -1);
+  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+  LIR* branch1 = OpCondBranch(kCondLt, NULL);
+  LIR* branch2 = OpCondBranch(kCondGt, NULL);
+  OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+  LIR* branch3 = OpCondBranch(kCondEq, NULL);
+
+  OpIT(kCondHi, "E");
+  NewLIR2(kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
+  LoadConstant(t_reg, 1);
+  GenBarrier();
+
+  target2 = NewLIR0(kPseudoTargetLabel);
+  OpRegReg(kOpNeg, t_reg, t_reg);
+
+  target1 = NewLIR0(kPseudoTargetLabel);
+
+  RegLocation rl_temp = LocCReturn(); // Just using as template, will change
+  rl_temp.low_reg = t_reg;
+  StoreValue(rl_dest, rl_temp);
+  FreeTemp(t_reg);
+
+  branch1->target = target1;
+  branch2->target = target2;
+  branch3->target = branch1->target;
+}
+
+void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
+                                          int64_t val, ConditionCode ccode)
+{
+  int32_t val_lo = Low32Bits(val);
+  int32_t val_hi = High32Bits(val);
+  DCHECK(ModifiedImmediate(val_lo) >= 0);
+  DCHECK(ModifiedImmediate(val_hi) >= 0);
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  int32_t low_reg = rl_src1.low_reg;
+  int32_t high_reg = rl_src1.high_reg;
+
+  switch(ccode) {
+    case kCondEq:
+    case kCondNe:
+      LIR* target;
+      ConditionCode condition;
+      if (ccode == kCondEq) {
+        target = not_taken;
+        condition = kCondEq;
+      } else {
+        target = taken;
+        condition = kCondNe;
+      }
+      if (val == 0) {
+        int t_reg = AllocTemp();
+        NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+        FreeTemp(t_reg);
+        OpCondBranch(condition, taken);
+        return;
+      }
+      OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
+      break;
+    case kCondLt:
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
+      ccode = kCondCc;
+      break;
+    case kCondLe:
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
+      ccode = kCondLs;
+      break;
+    case kCondGt:
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
+      ccode = kCondHi;
+      break;
+    case kCondGe:
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
+      ccode = kCondCs;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
+}
+
+void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
+{
+  RegLocation rl_result;
+  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
+  // Temporary debugging code
+  int dest_sreg = mir->ssa_rep->defs[0];
+  if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
+    LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
+              << PrettyMethod(cu_->method_idx,*cu_->dex_file);
+    LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
+    LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
+    LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
+    if (mir->ssa_rep->num_uses == 1) {
+      LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
+    } else {
+      LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
+                << mir->ssa_rep->uses[2];
+    }
+    CHECK(false) << "Invalid target sreg on Select.";
+  }
+  // End temporary debugging code
+  RegLocation rl_dest = mir_graph_->GetDest(mir);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  if (mir->ssa_rep->num_uses == 1) {
+    // CONST case
+    int true_val = mir->dalvikInsn.vB;
+    int false_val = mir->dalvikInsn.vC;
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    if ((true_val == 1) && (false_val == 0)) {
+      OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
+      OpIT(kCondCc, "");
+      LoadConstant(rl_result.low_reg, 0);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+    } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
+      OpRegImm(kOpCmp, rl_src.low_reg, 0);
+      OpIT(kCondEq, "E");
+      LoadConstant(rl_result.low_reg, true_val);
+      LoadConstant(rl_result.low_reg, false_val);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+    } else {
+      // Unlikely case - could be tuned.
+      int t_reg1 = AllocTemp();
+      int t_reg2 = AllocTemp();
+      LoadConstant(t_reg1, true_val);
+      LoadConstant(t_reg2, false_val);
+      OpRegImm(kOpCmp, rl_src.low_reg, 0);
+      OpIT(kCondEq, "E");
+      OpRegCopy(rl_result.low_reg, t_reg1);
+      OpRegCopy(rl_result.low_reg, t_reg2);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+    }
+  } else {
+    // MOVE case
+    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
+    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
+    rl_true = LoadValue(rl_true, kCoreReg);
+    rl_false = LoadValue(rl_false, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegImm(kOpCmp, rl_src.low_reg, 0);
+    OpIT(kCondEq, "E");
+    LIR* l1 = OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+    l1->flags.is_nop = false;  // Make sure this instruction isn't optimized away
+    LIR* l2 = OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+    l2->flags.is_nop = false;  // Make sure this instruction isn't optimized away
+    GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+  }
+  StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir)
+{
+  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+  // Normalize such that if either operand is constant, src2 will be constant.
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  if (rl_src1.is_const) {
+    RegLocation rl_temp = rl_src1;
+    rl_src1 = rl_src2;
+    rl_src2 = rl_temp;
+    ccode = FlipComparisonOrder(ccode);
+  }
+  if (rl_src2.is_const) {
+    RegLocation rl_temp = UpdateLocWide(rl_src2);
+    // Do special compare/branch against simple const operand if not already in registers.
+    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+    if ((rl_temp.location != kLocPhysReg) &&
+        ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
+      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
+      return;
+    }
+  }
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+  switch(ccode) {
+    case kCondEq:
+      OpCondBranch(kCondNe, not_taken);
+      break;
+    case kCondNe:
+      OpCondBranch(kCondNe, taken);
+      break;
+    case kCondLt:
+      OpCondBranch(kCondLt, taken);
+      OpCondBranch(kCondGt, not_taken);
+      ccode = kCondCc;
+      break;
+    case kCondLe:
+      OpCondBranch(kCondLt, taken);
+      OpCondBranch(kCondGt, not_taken);
+      ccode = kCondLs;
+      break;
+    case kCondGt:
+      OpCondBranch(kCondGt, taken);
+      OpCondBranch(kCondLt, not_taken);
+      ccode = kCondHi;
+      break;
+    case kCondGe:
+      OpCondBranch(kCondGt, taken);
+      OpCondBranch(kCondLt, not_taken);
+      ccode = kCondCs;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  OpCondBranch(ccode, taken);
+}
+
+/*
+ * Generate a register comparison to an immediate and branch.  Caller
+ * is responsible for setting branch target field.
+ */
+LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
+                                LIR* target)
+{
+  LIR* branch;
+  int mod_imm;
+  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
+  if ((ARM_LOWREG(reg)) && (check_value == 0) &&
+     ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
+    branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+                     reg, 0);
+  } else {
+    mod_imm = ModifiedImmediate(check_value);
+    if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
+      NewLIR2(kThumbCmpRI8, reg, check_value);
+    } else if (mod_imm >= 0) {
+      NewLIR2(kThumb2CmpRI12, reg, mod_imm);
+    } else {
+      int t_reg = AllocTemp();
+      LoadConstant(t_reg, check_value);
+      OpRegReg(kOpCmp, reg, t_reg);
+    }
+    branch = NewLIR2(kThumbBCond, 0, arm_cond);
+  }
+  branch->target = target;
+  return branch;
+}
+
+LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
+{
+  LIR* res;
+  int opcode;
+  if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+    return OpFpRegCopy(r_dest, r_src);
+  if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
+    opcode = kThumbMovRR;
+  else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
+     opcode = kThumbMovRR_H2H;
+  else if (ARM_LOWREG(r_dest))
+     opcode = kThumbMovRR_H2L;
+  else
+     opcode = kThumbMovRR_L2H;
+  res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src)
+{
+  LIR* res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
+  return res;
+}
+
+void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
+                               int src_hi)
+{
+  bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
+  bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
+  DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
+  DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+  if (dest_fp) {
+    if (src_fp) {
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+    } else {
+      NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
+    }
+  } else {
+    if (src_fp) {
+      NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
+    } else {
+      // Handle overlap
+      if (src_hi == dest_lo) {
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+      } else {
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+      }
+    }
+  }
+}
+
+// Table of magic divisors
+struct MagicTable {
+  uint32_t magic;
+  uint32_t shift;
+  DividePattern pattern;
+};
+
+static const MagicTable magic_table[] = {
+  {0, 0, DivideNone},        // 0
+  {0, 0, DivideNone},        // 1
+  {0, 0, DivideNone},        // 2
+  {0x55555556, 0, Divide3},  // 3
+  {0, 0, DivideNone},        // 4
+  {0x66666667, 1, Divide5},  // 5
+  {0x2AAAAAAB, 0, Divide3},  // 6
+  {0x92492493, 2, Divide7},  // 7
+  {0, 0, DivideNone},        // 8
+  {0x38E38E39, 1, Divide5},  // 9
+  {0x66666667, 2, Divide5},  // 10
+  {0x2E8BA2E9, 1, Divide5},  // 11
+  {0x2AAAAAAB, 1, Divide5},  // 12
+  {0x4EC4EC4F, 2, Divide5},  // 13
+  {0x92492493, 3, Divide7},  // 14
+  {0x88888889, 3, Divide7},  // 15
+};
+
+// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
+bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
+    return false;
+  }
+  DividePattern pattern = magic_table[lit].pattern;
+  if (pattern == DivideNone) {
+    return false;
+  }
+  // Tuning: add rem patterns
+  if (dalvik_opcode != Instruction::DIV_INT_LIT8) {
+    return false;
+  }
+
+  int r_magic = AllocTemp();
+  LoadConstant(r_magic, magic_table[lit].magic);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int r_hi = AllocTemp();
+  int r_lo = AllocTemp();
+  NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
+  switch(pattern) {
+    case Divide3:
+      OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
+               rl_src.low_reg, EncodeShift(kArmAsr, 31));
+      break;
+    case Divide5:
+      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+               EncodeShift(kArmAsr, magic_table[lit].shift));
+      break;
+    case Divide7:
+      OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+               EncodeShift(kArmAsr, magic_table[lit].shift));
+      break;
+    default:
+      LOG(FATAL) << "Unexpected pattern: " << pattern;
+  }
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
+                    int reg1, int base, int offset, ThrowKind kind)
+{
+  LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
+  return NULL;
+}
+
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
+                                     bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
+  return rl_dest;
+}
+
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
+                                  bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
+  return rl_dest;
+}
+
+bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
+{
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
+  RegLocation rl_src1 = info->args[0];
+  RegLocation rl_src2 = info->args[1];
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  OpIT((is_min) ? kCondGt : kCondLt, "E");
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+  GenBarrier();
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
+{
+  LOG(FATAL) << "Unexpected use of OpLea for Arm";
+}
+
+void ArmMir2Lir::OpTlsCmp(int offset, int val)
+{
+  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
+}
+
+bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
+  // Unused - RegLocation rl_src_unsafe = info->args[0];
+  RegLocation rl_src_obj= info->args[1];  // Object - known non-null
+  RegLocation rl_src_offset= info->args[2];  // long low
+  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
+  RegLocation rl_src_expected= info->args[4];  // int or Object
+  RegLocation rl_src_new_value= info->args[5];  // int or Object
+  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
+
+
+  // Release store semantics, get the barrier out of the way.  TODO: revisit
+  GenMemBarrier(kStoreLoad);
+
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
+
+  if (need_write_barrier && !mir_graph_->IsConstantNullRef(rl_new_value)) {
+    // Mark card for object assuming new value is stored.
+    MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
+  }
+
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+
+  int r_ptr = AllocTemp();
+  OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
+
+  // Free now unneeded rl_object and rl_offset to give more temps.
+  ClobberSReg(rl_object.s_reg_low);
+  FreeTemp(rl_object.low_reg);
+  ClobberSReg(rl_offset.s_reg_low);
+  FreeTemp(rl_offset.low_reg);
+
+  int r_old_value = AllocTemp();
+  NewLIR3(kThumb2Ldrex, r_old_value, r_ptr, 0);  // r_old_value := [r_ptr]
+
+  RegLocation rl_expected = LoadValue(rl_src_expected, kCoreReg);
+
+  // if (r_old_value == rExpected) {
+  //   [r_ptr] <- r_new_value && r_result := success ? 0 : 1
+  //   r_result ^= 1
+  // } else {
+  //   r_result := 0
+  // }
+  OpRegReg(kOpCmp, r_old_value, rl_expected.low_reg);
+  FreeTemp(r_old_value);  // Now unneeded.
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpIT(kCondEq, "TE");
+  NewLIR4(kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
+  FreeTemp(r_ptr);  // Now unneeded.
+  OpRegImm(kOpXor, rl_result.low_reg, 1);
+  OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
+
+  StoreValue(rl_dest, rl_result);
+
+  return true;
+}
+
+LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target)
+{
+  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+}
+
+LIR* ArmMir2Lir::OpVldm(int rBase, int count)
+{
+  return NewLIR3(kThumb2Vldms, rBase, fr0, count);
+}
+
+LIR* ArmMir2Lir::OpVstm(int rBase, int count)
+{
+  return NewLIR3(kThumb2Vstms, rBase, fr0, count);
+}
+
+void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                               RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit)
+{
+  OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+                   EncodeShift(kArmLsl, second_bit - first_bit));
+  if (first_bit != 0) {
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+  }
+}
+
+void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
+{
+  int t_reg = AllocTemp();
+  NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+  FreeTemp(t_reg);
+  GenCheck(kCondEq, kThrowDivZero);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* ArmMir2Lir::OpTestSuspend(LIR* target)
+{
+  NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
+  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+}
+
+// Decrement register and branch on condition
+LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
+{
+  // Combine sub & test using sub setflags encoding here
+  NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
+  return OpCondBranch(c_code, target);
+}
+
+void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+  int dmb_flavor;
+  // TODO: revisit Arm barrier kinds
+  switch (barrier_kind) {
+    case kLoadStore: dmb_flavor = kSY; break;
+    case kLoadLoad: dmb_flavor = kSY; break;
+    case kStoreStore: dmb_flavor = kST; break;
+    case kStoreLoad: dmb_flavor = kSY; break;
+    default:
+      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
+      dmb_flavor = kSY;  // quiet gcc.
+      break;
+  }
+  LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
+  dmb->def_mask = ENCODE_ALL;
+#endif
+}
+
+void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int z_reg = AllocTemp();
+  LoadConstantNoClobber(z_reg, 0);
+  // Check for destructive overlap
+  if (rl_result.low_reg == rl_src.high_reg) {
+    int t_reg = AllocTemp();
+    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
+    FreeTemp(t_reg);
+  } else {
+    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
+  }
+  FreeTemp(z_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+
+ /*
+  * Check to see if a result pair has a misaligned overlap with an operand pair.  This
+  * is not usual for dx to generate, but it is legal (for now).  In a future rev of
+  * dex, we'll want to make this case illegal.
+  */
+bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest)
+{
+  DCHECK(rl_src.wide);
+  DCHECK(rl_dest.wide);
+  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
+}
+
+void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+    /*
+     * To pull off inline multiply, we have a worst-case requirement of 8 temporary
+     * registers.  Normally for Arm, we get 5.  We can get to 6 by including
+     * lr in the temp set.  The only problematic case is all operands and result are
+     * distinct, and none have been promoted.  In that case, we can succeed by aggressively
+     * freeing operand temp registers after they are no longer needed.  All other cases
+     * can proceed normally.  We'll just punt on the case of the result having a misaligned
+     * overlap with either operand and send that case to a runtime handler.
+     */
+    RegLocation rl_result;
+    if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
+      int func_offset = ENTRYPOINT_OFFSET(pLmul);
+      FlushAllRegs();
+      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(false);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    }
+    // Temporarily add LR to the temp pool, and assign it to tmp1
+    MarkTemp(rARM_LR);
+    FreeTemp(rARM_LR);
+    int tmp1 = rARM_LR;
+    LockTemp(rARM_LR);
+
+    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+    bool special_case = true;
+    // If operands are the same, or any pair has been promoted we're not the special case.
+    if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
+        (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
+        (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
+      special_case = false;
+    }
+    // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
+    int res_lo = AllocTemp();
+    int res_hi;
+    if (rl_src1.low_reg == rl_src2.low_reg) {
+      res_hi = AllocTemp();
+      NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
+      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
+      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+    } else {
+      // In the special case, all temps are now allocated
+      NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
+      if (special_case) {
+        DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
+        DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
+        FreeTemp(rl_src1.high_reg);
+      }
+      res_hi = AllocTemp();
+
+      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
+      NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
+      NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
+      if (special_case) {
+        FreeTemp(rl_src1.low_reg);
+        Clobber(rl_src1.low_reg);
+        Clobber(rl_src1.high_reg);
+      }
+    }
+    FreeTemp(tmp1);
+    rl_result = GetReturnWide(false); // Just using as a template.
+    rl_result.low_reg = res_lo;
+    rl_result.high_reg = res_hi;
+    StoreValueWide(rl_dest, rl_result);
+    // Now, restore lr to its non-temp status.
+    Clobber(rARM_LR);
+    UnmarkTemp(rARM_LR);
+}
+
+void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
+}
+
+void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
+}
+
+void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
+}
+
+void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
+}
+
+void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
+}
+
+/*
+ * Generate array load
+ */
+void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  RegLocation rl_result;
+  bool constant_index = rl_index.is_const;
+  rl_array = LoadValue(rl_array, kCoreReg);
+  if (!constant_index) {
+    rl_index = LoadValue(rl_index, kCoreReg);
+  }
+
+  if (rl_dest.wide) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  // If index is constant, just fold it into the data offset
+  if (constant_index) {
+    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    /* Get len */
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+  }
+  if (rl_dest.wide || rl_dest.fp || constant_index) {
+    int reg_ptr;
+    if (constant_index) {
+      reg_ptr = rl_array.low_reg;  // NOTE: must not alter reg_ptr in constant case.
+    } else {
+      // No special indexed operation, lea + load w/ displacement
+      reg_ptr = AllocTemp();
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+                       EncodeShift(kArmLsl, scale));
+      FreeTemp(rl_index.low_reg);
+    }
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      if (constant_index) {
+        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
+      } else {
+        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+      }
+      FreeTemp(reg_len);
+    }
+    if (rl_dest.wide) {
+      LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+      if (!constant_index) {
+        FreeTemp(reg_ptr);
+      }
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+      if (!constant_index) {
+        FreeTemp(reg_ptr);
+      }
+      StoreValue(rl_dest, rl_result);
+    }
+  } else {
+    // Offset base, then use indexed load
+    int reg_ptr = AllocTemp();
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+    FreeTemp(rl_array.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      // TODO: change kCondCS to a more meaningful name, is the sense of
+      // carry-set/clear flipped?
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+    LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+    FreeTemp(reg_ptr);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  bool constant_index = rl_index.is_const;
+
+  if (rl_src.wide) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  // If index is constant, just fold it into the data offset.
+  if (constant_index) {
+    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
+  }
+
+  rl_array = LoadValue(rl_array, kCoreReg);
+  if (!constant_index) {
+    rl_index = LoadValue(rl_index, kCoreReg);
+  }
+
+  int reg_ptr;
+  if (constant_index) {
+    reg_ptr = rl_array.low_reg;
+  } else if (IsTemp(rl_array.low_reg)) {
+    Clobber(rl_array.low_reg);
+    reg_ptr = rl_array.low_reg;
+  } else {
+    reg_ptr = AllocTemp();
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    //NOTE: max live temps(4) here.
+    /* Get len */
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+  }
+  /* at this point, reg_ptr points to array, 2 live temps */
+  if (rl_src.wide || rl_src.fp || constant_index) {
+    if (rl_src.wide) {
+      rl_src = LoadValueWide(rl_src, reg_class);
+    } else {
+      rl_src = LoadValue(rl_src, reg_class);
+    }
+    if (!constant_index) {
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+                       EncodeShift(kArmLsl, scale));
+    }
+    if (needs_range_check) {
+      if (constant_index) {
+        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
+      } else {
+        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+      }
+      FreeTemp(reg_len);
+    }
+
+    if (rl_src.wide) {
+      StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+    } else {
+      StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
+    }
+  } else {
+    /* reg_ptr -> array data */
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+    rl_src = LoadValue(rl_src, reg_class);
+    if (needs_range_check) {
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+    StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+                     scale, size);
+  }
+  if (!constant_index) {
+    FreeTemp(reg_ptr);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
+
+  int r_value = TargetReg(kArg0);  // Register holding value
+  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
+  int r_array = TargetReg(kArg2);  // Register holding array
+  int r_index = TargetReg(kArg3);  // Register holding index into array
+
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
+
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+
+  // Store of null?
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
+
+  // Get the array's class.
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+                          r_array_class, true);
+  // Redo LoadValues in case they didn't survive the call.
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
+  r_array_class = INVALID_REG;
+
+  // Branch here if value to be stored == null
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  null_value_check->target = target;
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = TargetReg(kArg1);
+    LoadWordDisp(r_array, len_offset, reg_len);  // Get len
+  }
+  /* r_ptr -> array data */
+  int r_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, r_ptr, r_array, data_offset);
+  if (needs_range_check) {
+    GenRegRegCheck(kCondCs, r_index, reg_len, kThrowArrayBounds);
+  }
+  StoreBaseIndexed(r_ptr, r_index, r_value, scale, kWord);
+  FreeTemp(r_ptr);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
+  }
+}
+
+void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift)
+{
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  // Per spec, we only care about low 6 bits of shift amount.
+  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+  if (shift_amount == 0) {
+    StoreValueWide(rl_dest, rl_src);
+    return;
+  }
+  if (BadOverlap(rl_src, rl_dest)) {
+    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
+    return;
+  }
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  switch(opcode) {
+    case Instruction::SHL_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+      if (shift_amount == 1) {
+        OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
+        OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
+      } else if (shift_amount == 32) {
+        OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+        LoadConstant(rl_result.low_reg, 0);
+      } else if (shift_amount > 31) {
+        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
+        LoadConstant(rl_result.low_reg, 0);
+      } else {
+        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
+                         EncodeShift(kArmLsr, 32 - shift_amount));
+        OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
+      }
+      break;
+    case Instruction::SHR_LONG:
+    case Instruction::SHR_LONG_2ADDR:
+      if (shift_amount == 32) {
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+      } else if (shift_amount > 31) {
+        OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+      } else {
+        int t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+                         EncodeShift(kArmLsl, 32 - shift_amount));
+        FreeTemp(t_reg);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+      }
+      break;
+    case Instruction::USHR_LONG:
+    case Instruction::USHR_LONG_2ADDR:
+      if (shift_amount == 32) {
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        LoadConstant(rl_result.high_reg, 0);
+      } else if (shift_amount > 31) {
+        OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+        LoadConstant(rl_result.high_reg, 0);
+      } else {
+        int t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+                         EncodeShift(kArmLsl, 32 - shift_amount));
+        FreeTemp(t_reg);
+        OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+      }
+      break;
+    default:
+      LOG(FATAL) << "Unexpected case";
+  }
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
+    if (!rl_src2.is_const) {
+      // Don't bother with special handling for subtract from immediate.
+      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    }
+  } else {
+    // Normalize
+    if (!rl_src2.is_const) {
+      DCHECK(rl_src1.is_const);
+      RegLocation rl_temp = rl_src1;
+      rl_src1 = rl_src2;
+      rl_src2 = rl_temp;
+    }
+  }
+  if (BadOverlap(rl_src1, rl_dest)) {
+    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+    return;
+  }
+  DCHECK(rl_src2.is_const);
+  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+  uint32_t val_lo = Low32Bits(val);
+  uint32_t val_hi = High32Bits(val);
+  int32_t mod_imm_lo = ModifiedImmediate(val_lo);
+  int32_t mod_imm_hi = ModifiedImmediate(val_hi);
+
+  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
+  switch(opcode) {
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
+        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+        return;
+      }
+      break;
+    default:
+      break;
+  }
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
+  switch (opcode) {
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      NewLIR3(kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+      NewLIR3(kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+      break;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
+        OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
+      }
+      if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
+        OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
+      }
+      break;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
+      OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
+      break;
+    case Instruction::AND_LONG:
+    case Instruction::AND_LONG_2ADDR:
+      if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
+        OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
+      }
+      if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
+        OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
+      }
+      break;
+    case Instruction::SUB_LONG_2ADDR:
+    case Instruction::SUB_LONG:
+      NewLIR3(kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+      NewLIR3(kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode " << opcode;
+  }
+  StoreValueWide(rl_dest, rl_result);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
new file mode 100644
index 0000000..4bece13
--- /dev/null
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+
+namespace art {
+
+static int core_regs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
+                         r11, r12, rARM_SP, rARM_LR, rARM_PC};
+static int ReservedRegs[] = {rARM_SUSPEND, rARM_SELF, rARM_SP, rARM_LR, rARM_PC};
+static int FpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+                       fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15,
+                       fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23,
+                       fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31};
+static int core_temps[] = {r0, r1, r2, r3, r12};
+static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+                        fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
+
+RegLocation ArmMir2Lir::LocCReturn()
+{
+  RegLocation res = ARM_LOC_C_RETURN;
+  return res;
+}
+
+RegLocation ArmMir2Lir::LocCReturnWide()
+{
+  RegLocation res = ARM_LOC_C_RETURN_WIDE;
+  return res;
+}
+
+RegLocation ArmMir2Lir::LocCReturnFloat()
+{
+  RegLocation res = ARM_LOC_C_RETURN_FLOAT;
+  return res;
+}
+
+RegLocation ArmMir2Lir::LocCReturnDouble()
+{
+  RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
+  return res;
+}
+
+// Return a target-dependent special register.
+int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
+  int res = INVALID_REG;
+  switch (reg) {
+    case kSelf: res = rARM_SELF; break;
+    case kSuspend: res =  rARM_SUSPEND; break;
+    case kLr: res =  rARM_LR; break;
+    case kPc: res =  rARM_PC; break;
+    case kSp: res =  rARM_SP; break;
+    case kArg0: res = rARM_ARG0; break;
+    case kArg1: res = rARM_ARG1; break;
+    case kArg2: res = rARM_ARG2; break;
+    case kArg3: res = rARM_ARG3; break;
+    case kFArg0: res = rARM_FARG0; break;
+    case kFArg1: res = rARM_FARG1; break;
+    case kFArg2: res = rARM_FARG2; break;
+    case kFArg3: res = rARM_FARG3; break;
+    case kRet0: res = rARM_RET0; break;
+    case kRet1: res = rARM_RET1; break;
+    case kInvokeTgt: res = rARM_INVOKE_TGT; break;
+    case kCount: res = rARM_COUNT; break;
+  }
+  return res;
+}
+
+
+// Create a double from a pair of singles.
+int ArmMir2Lir::S2d(int low_reg, int high_reg)
+{
+  return ARM_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t ArmMir2Lir::FpRegMask()
+{
+  return ARM_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool ArmMir2Lir::SameRegType(int reg1, int reg2)
+{
+  return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t ArmMir2Lir::GetRegMaskCommon(int reg)
+{
+  uint64_t seed;
+  int shift;
+  int reg_id;
+
+
+  reg_id = reg & 0x1f;
+  /* Each double register is equal to a pair of single-precision FP registers */
+  seed = ARM_DOUBLEREG(reg) ? 3 : 1;
+  /* FP register starts at bit position 16 */
+  shift = ARM_FPREG(reg) ? kArmFPReg0 : 0;
+  /* Expand the double register id into single offset */
+  shift += reg_id;
+  return (seed << shift);
+}
+
+uint64_t ArmMir2Lir::GetPCUseDefEncoding()
+{
+  return ENCODE_ARM_REG_PC;
+}
+
+void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir)
+{
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
+
+  // Thumb2 specific setup
+  uint64_t flags = ArmMir2Lir::EncodingMap[lir->opcode].flags;
+  int opcode = lir->opcode;
+
+  if (flags & REG_DEF_SP) {
+    lir->def_mask |= ENCODE_ARM_REG_SP;
+  }
+
+  if (flags & REG_USE_SP) {
+    lir->use_mask |= ENCODE_ARM_REG_SP;
+  }
+
+  if (flags & REG_DEF_LIST0) {
+    lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+  }
+
+  if (flags & REG_DEF_LIST1) {
+    lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+  }
+
+  if (flags & REG_DEF_FPCS_LIST0) {
+    lir->def_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+  }
+
+  if (flags & REG_DEF_FPCS_LIST2) {
+    for (int i = 0; i < lir->operands[2]; i++) {
+      SetupRegMask(&lir->def_mask, lir->operands[1] + i);
+    }
+  }
+
+  if (flags & REG_USE_PC) {
+    lir->use_mask |= ENCODE_ARM_REG_PC;
+  }
+
+  /* Conservatively treat the IT block */
+  if (flags & IS_IT) {
+    lir->def_mask = ENCODE_ALL;
+  }
+
+  if (flags & REG_USE_LIST0) {
+    lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+  }
+
+  if (flags & REG_USE_LIST1) {
+    lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+  }
+
+  if (flags & REG_USE_FPCS_LIST0) {
+    lir->use_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+  }
+
+  if (flags & REG_USE_FPCS_LIST2) {
+    for (int i = 0; i < lir->operands[2]; i++) {
+      SetupRegMask(&lir->use_mask, lir->operands[1] + i);
+    }
+  }
+  /* Fixup for kThumbPush/lr and kThumbPop/pc */
+  if (opcode == kThumbPush || opcode == kThumbPop) {
+    uint64_t r8Mask = GetRegMaskCommon(r8);
+    if ((opcode == kThumbPush) && (lir->use_mask & r8Mask)) {
+      lir->use_mask &= ~r8Mask;
+      lir->use_mask |= ENCODE_ARM_REG_LR;
+    } else if ((opcode == kThumbPop) && (lir->def_mask & r8Mask)) {
+      lir->def_mask &= ~r8Mask;
+      lir->def_mask |= ENCODE_ARM_REG_PC;
+    }
+  }
+  if (flags & REG_DEF_LR) {
+    lir->def_mask |= ENCODE_ARM_REG_LR;
+  }
+}
+
+ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode)
+{
+  ArmConditionCode res;
+  switch (ccode) {
+    case kCondEq: res = kArmCondEq; break;
+    case kCondNe: res = kArmCondNe; break;
+    case kCondCs: res = kArmCondCs; break;
+    case kCondCc: res = kArmCondCc; break;
+    case kCondMi: res = kArmCondMi; break;
+    case kCondPl: res = kArmCondPl; break;
+    case kCondVs: res = kArmCondVs; break;
+    case kCondVc: res = kArmCondVc; break;
+    case kCondHi: res = kArmCondHi; break;
+    case kCondLs: res = kArmCondLs; break;
+    case kCondGe: res = kArmCondGe; break;
+    case kCondLt: res = kArmCondLt; break;
+    case kCondGt: res = kArmCondGt; break;
+    case kCondLe: res = kArmCondLe; break;
+    case kCondAl: res = kArmCondAl; break;
+    case kCondNv: res = kArmCondNv; break;
+    default:
+      LOG(FATAL) << "Bad condition code " << ccode;
+      res = static_cast<ArmConditionCode>(0);  // Quiet gcc
+  }
+  return res;
+}
+
+static const char* core_reg_names[16] = {
+  "r0",
+  "r1",
+  "r2",
+  "r3",
+  "r4",
+  "r5",
+  "r6",
+  "r7",
+  "r8",
+  "rSELF",
+  "r10",
+  "r11",
+  "r12",
+  "sp",
+  "lr",
+  "pc",
+};
+
+
+static const char* shift_names[4] = {
+  "lsl",
+  "lsr",
+  "asr",
+  "ror"};
+
+/* Decode and print a ARM register name */
+static char* DecodeRegList(int opcode, int vector, char* buf)
+{
+  int i;
+  bool printed = false;
+  buf[0] = 0;
+  for (i = 0; i < 16; i++, vector >>= 1) {
+    if (vector & 0x1) {
+      int reg_id = i;
+      if (opcode == kThumbPush && i == 8) {
+        reg_id = r14lr;
+      } else if (opcode == kThumbPop && i == 8) {
+        reg_id = r15pc;
+      }
+      if (printed) {
+        sprintf(buf + strlen(buf), ", r%d", reg_id);
+      } else {
+        printed = true;
+        sprintf(buf, "r%d", reg_id);
+      }
+    }
+  }
+  return buf;
+}
+
+static char*  DecodeFPCSRegList(int count, int base, char* buf)
+{
+  sprintf(buf, "s%d", base);
+  for (int i = 1; i < count; i++) {
+    sprintf(buf + strlen(buf), ", s%d",base + i);
+  }
+  return buf;
+}
+
+static int ExpandImmediate(int value)
+{
+  int mode = (value & 0xf00) >> 8;
+  uint32_t bits = value & 0xff;
+  switch (mode) {
+    case 0:
+      return bits;
+     case 1:
+      return (bits << 16) | bits;
+     case 2:
+      return (bits << 24) | (bits << 8);
+     case 3:
+      return (bits << 24) | (bits << 16) | (bits << 8) | bits;
+    default:
+      break;
+  }
+  bits = (bits | 0x80) << 24;
+  return bits >> (((value & 0xf80) >> 7) - 8);
+}
+
+const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
+                         "hi","ls","ge","lt","gt","le","al","nv"};
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
+{
+  std::string buf;
+  int i;
+  const char* fmt_end = &fmt[strlen(fmt)];
+  char tbuf[256];
+  const char* name;
+  char nc;
+  while (fmt < fmt_end) {
+    int operand;
+    if (*fmt == '!') {
+      fmt++;
+      DCHECK_LT(fmt, fmt_end);
+      nc = *fmt++;
+      if (nc=='!') {
+        strcpy(tbuf, "!");
+      } else {
+         DCHECK_LT(fmt, fmt_end);
+         DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
+         operand = lir->operands[nc-'0'];
+         switch (*fmt++) {
+           case 'H':
+             if (operand != 0) {
+               sprintf(tbuf, ", %s %d",shift_names[operand & 0x3], operand >> 2);
+             } else {
+               strcpy(tbuf,"");
+             }
+             break;
+           case 'B':
+             switch (operand) {
+               case kSY:
+                 name = "sy";
+                 break;
+               case kST:
+                 name = "st";
+                 break;
+               case kISH:
+                 name = "ish";
+                 break;
+               case kISHST:
+                 name = "ishst";
+                 break;
+               case kNSH:
+                 name = "nsh";
+                 break;
+               case kNSHST:
+                 name = "shst";
+                 break;
+               default:
+                 name = "DecodeError2";
+                 break;
+             }
+             strcpy(tbuf, name);
+             break;
+           case 'b':
+             strcpy(tbuf,"0000");
+             for (i=3; i>= 0; i--) {
+               tbuf[i] += operand & 1;
+               operand >>= 1;
+             }
+             break;
+           case 'n':
+             operand = ~ExpandImmediate(operand);
+             sprintf(tbuf,"%d [%#x]", operand, operand);
+             break;
+           case 'm':
+             operand = ExpandImmediate(operand);
+             sprintf(tbuf,"%d [%#x]", operand, operand);
+             break;
+           case 's':
+             sprintf(tbuf,"s%d",operand & ARM_FP_REG_MASK);
+             break;
+           case 'S':
+             sprintf(tbuf,"d%d",(operand & ARM_FP_REG_MASK) >> 1);
+             break;
+           case 'h':
+             sprintf(tbuf,"%04x", operand);
+             break;
+           case 'M':
+           case 'd':
+             sprintf(tbuf,"%d", operand);
+             break;
+           case 'C':
+             DCHECK_LT(operand, static_cast<int>(
+                 sizeof(core_reg_names)/sizeof(core_reg_names[0])));
+             sprintf(tbuf,"%s",core_reg_names[operand]);
+             break;
+           case 'E':
+             sprintf(tbuf,"%d", operand*4);
+             break;
+           case 'F':
+             sprintf(tbuf,"%d", operand*2);
+             break;
+           case 'c':
+             strcpy(tbuf, cc_names[operand]);
+             break;
+           case 't':
+             sprintf(tbuf,"0x%08x (L%p)",
+                 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
+                 (operand << 1),
+                 lir->target);
+             break;
+           case 'u': {
+             int offset_1 = lir->operands[0];
+             int offset_2 = NEXT_LIR(lir)->operands[0];
+             uintptr_t target =
+                 (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) &
+                 ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
+                 0xfffffffc;
+             sprintf(tbuf, "%p", reinterpret_cast<void *>(target));
+             break;
+          }
+
+           /* Nothing to print for BLX_2 */
+           case 'v':
+             strcpy(tbuf, "see above");
+             break;
+           case 'R':
+             DecodeRegList(lir->opcode, operand, tbuf);
+             break;
+           case 'P':
+             DecodeFPCSRegList(operand, 16, tbuf);
+             break;
+           case 'Q':
+             DecodeFPCSRegList(operand, 0, tbuf);
+             break;
+           default:
+             strcpy(tbuf,"DecodeError1");
+             break;
+        }
+        buf += tbuf;
+      }
+    } else {
+       buf += *fmt++;
+    }
+  }
+  return buf;
+}
+
+void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
+{
+  char buf[256];
+  buf[0] = 0;
+
+  if (mask == ENCODE_ALL) {
+    strcpy(buf, "all");
+  } else {
+    char num[8];
+    int i;
+
+    for (i = 0; i < kArmRegEnd; i++) {
+      if (mask & (1ULL << i)) {
+        sprintf(num, "%d ", i);
+        strcat(buf, num);
+      }
+    }
+
+    if (mask & ENCODE_CCODE) {
+      strcat(buf, "cc ");
+    }
+    if (mask & ENCODE_FP_STATUS) {
+      strcat(buf, "fpcc ");
+    }
+
+    /* Memory bits */
+    if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+      sprintf(buf + strlen(buf), "dr%d%s", arm_lir->alias_info & 0xffff,
+              (arm_lir->alias_info & 0x80000000) ? "(+1)" : "");
+    }
+    if (mask & ENCODE_LITERAL) {
+      strcat(buf, "lit ");
+    }
+
+    if (mask & ENCODE_HEAP_REF) {
+      strcat(buf, "heap ");
+    }
+    if (mask & ENCODE_MUST_NOT_ALIAS) {
+      strcat(buf, "noalias ");
+    }
+  }
+  if (buf[0]) {
+    LOG(INFO) << prefix << ": " << buf;
+  }
+}
+
+bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir)
+{
+  return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
+}
+
+ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
+  // Sanity check - make sure encoding map lines up.
+  for (int i = 0; i < kArmLast; i++) {
+    if (ArmMir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+                 << " is wrong: expecting " << i << ", seeing "
+                 << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
+    }
+  }
+}
+
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena) {
+  return new ArmMir2Lir(cu, mir_graph, arena);
+}
+
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class)
+{
+  int high_reg;
+  int low_reg;
+  int res = 0;
+
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    low_reg = AllocTempDouble();
+    high_reg = low_reg + 1;
+  } else {
+    low_reg = AllocTemp();
+    high_reg = AllocTemp();
+  }
+  res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+  return res;
+}
+
+int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class)
+{
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+    return AllocTempFloat();
+  return AllocTemp();
+}
+
+void ArmMir2Lir::CompilerInitializeRegAlloc()
+{
+  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
+      (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                     ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs = static_cast<RegisterInfo*>
+      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                      ArenaAllocator::kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
+  // Keep special registers from being allocated
+  for (int i = 0; i < num_reserved; i++) {
+    if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
+      //To measure cost of suspend check
+      continue;
+    }
+    MarkInUse(ReservedRegs[i]);
+  }
+  // Mark temp regs - all others not in use can be used for promotion
+  for (int i = 0; i < num_temps; i++) {
+    MarkTemp(core_temps[i]);
+  }
+  for (int i = 0; i < num_fp_temps; i++) {
+    MarkTemp(fp_temps[i]);
+  }
+
+  // Start allocation at r2 in an attempt to avoid clobbering return values
+  reg_pool_->next_core_reg = r2;
+}
+
+void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
+                     RegLocation rl_free)
+{
+  if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+    (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+    // No overlap, free both
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
+  }
+}
+/*
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
+ * instructions might call out to C/assembly helper functions.  Until
+ * machinery is in place, always spill lr.
+ */
+
+void ArmMir2Lir::AdjustSpillMask()
+{
+  core_spill_mask_ |= (1 << rARM_LR);
+  num_core_spills_++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted.  Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask.  Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg)
+{
+  DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
+  reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
+  // Ensure fp_vmap_table is large enough
+  int table_size = fp_vmap_table_.size();
+  for (int i = table_size; i < (reg + 1); i++) {
+    fp_vmap_table_.push_back(INVALID_VREG);
+  }
+  // Add the current mapping
+  fp_vmap_table_[reg] = v_reg;
+  // Size of fp_vmap_table is high-water mark, use to set mask
+  num_fp_spills_ = fp_vmap_table_.size();
+  fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
+}
+
+void ArmMir2Lir::FlushRegWide(int reg1, int reg2)
+{
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
+  DCHECK(info1 && info2 && info1->pair && info2->pair &&
+       (info1->partner == info2->reg) &&
+       (info2->partner == info1->reg));
+  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+    if (!(info1->is_temp && info2->is_temp)) {
+      /* Should not happen.  If it does, there's a problem in eval_loc */
+      LOG(FATAL) << "Long half-temp, half-promoted";
+    }
+
+    info1->dirty = false;
+    info2->dirty = false;
+    if (mir_graph_->SRegToVReg(info2->s_reg) <
+      mir_graph_->SRegToVReg(info1->s_reg))
+      info1 = info2;
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rARM_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+  }
+}
+
+void ArmMir2Lir::FlushReg(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  if (info->live && info->dirty) {
+    info->dirty = false;
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rARM_SP, VRegOffset(v_reg), reg, kWord);
+  }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool ArmMir2Lir::IsFpReg(int reg) {
+  return ARM_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void ArmMir2Lir::ClobberCalleeSave()
+{
+  Clobber(r0);
+  Clobber(r1);
+  Clobber(r2);
+  Clobber(r3);
+  Clobber(r12);
+  Clobber(r14lr);
+  Clobber(fr0);
+  Clobber(fr1);
+  Clobber(fr2);
+  Clobber(fr3);
+  Clobber(fr4);
+  Clobber(fr5);
+  Clobber(fr6);
+  Clobber(fr7);
+  Clobber(fr8);
+  Clobber(fr9);
+  Clobber(fr10);
+  Clobber(fr11);
+  Clobber(fr12);
+  Clobber(fr13);
+  Clobber(fr14);
+  Clobber(fr15);
+}
+
+RegLocation ArmMir2Lir::GetReturnWideAlt()
+{
+  RegLocation res = LocCReturnWide();
+  res.low_reg = r2;
+  res.high_reg = r3;
+  Clobber(r2);
+  Clobber(r3);
+  MarkInUse(r2);
+  MarkInUse(r3);
+  MarkPair(res.low_reg, res.high_reg);
+  return res;
+}
+
+RegLocation ArmMir2Lir::GetReturnAlt()
+{
+  RegLocation res = LocCReturn();
+  res.low_reg = r1;
+  Clobber(r1);
+  MarkInUse(r1);
+  return res;
+}
+
+ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg)
+{
+  return ARM_FPREG(reg) ? &reg_pool_->FPRegs[reg & ARM_FP_REG_MASK]
+      : &reg_pool_->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void ArmMir2Lir::LockCallTemps()
+{
+  LockTemp(r0);
+  LockTemp(r1);
+  LockTemp(r2);
+  LockTemp(r3);
+}
+
+/* To be used when explicitly managing register use */
+void ArmMir2Lir::FreeCallTemps()
+{
+  FreeTemp(r0);
+  FreeTemp(r1);
+  FreeTemp(r2);
+  FreeTemp(r3);
+}
+
+int ArmMir2Lir::LoadHelper(int offset)
+{
+  LoadWordDisp(rARM_SELF, offset, rARM_LR);
+  return rARM_LR;
+}
+
+uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode)
+{
+  return ArmMir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* ArmMir2Lir::GetTargetInstName(int opcode)
+{
+  return ArmMir2Lir::EncodingMap[opcode].name;
+}
+
+const char* ArmMir2Lir::GetTargetInstFmt(int opcode)
+{
+  return ArmMir2Lir::EncodingMap[opcode].fmt;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
new file mode 100644
index 0000000..abf921f
--- /dev/null
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -0,0 +1,1093 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "dex/quick/mir_to_lir-inl.h"
+
+namespace art {
+
+/* This file contains codegen for the Thumb ISA. */
+
+static int EncodeImmSingle(int value)
+{
+  int res;
+  int bit_a =  (value & 0x80000000) >> 31;
+  int not_bit_b = (value & 0x40000000) >> 30;
+  int bit_b =  (value & 0x20000000) >> 29;
+  int b_smear =  (value & 0x3e000000) >> 25;
+  int slice =   (value & 0x01f80000) >> 19;
+  int zeroes =  (value & 0x0007ffff);
+  if (zeroes != 0)
+    return -1;
+  if (bit_b) {
+    if ((not_bit_b != 0) || (b_smear != 0x1f))
+      return -1;
+  } else {
+    if ((not_bit_b != 1) || (b_smear != 0x0))
+      return -1;
+  }
+  res = (bit_a << 7) | (bit_b << 6) | slice;
+  return res;
+}
+
+/*
+ * Determine whether value can be encoded as a Thumb2 floating point
+ * immediate.  If not, return -1.  If so return encoded 8-bit value.
+ */
+static int EncodeImmDouble(int64_t value)
+{
+  int res;
+  int bit_a = (value & 0x8000000000000000ll) >> 63;
+  int not_bit_b = (value & 0x4000000000000000ll) >> 62;
+  int bit_b = (value & 0x2000000000000000ll) >> 61;
+  int b_smear = (value & 0x3fc0000000000000ll) >> 54;
+  int slice =  (value & 0x003f000000000000ll) >> 48;
+  uint64_t zeroes = (value & 0x0000ffffffffffffll);
+  if (zeroes != 0)
+    return -1;
+  if (bit_b) {
+    if ((not_bit_b != 0) || (b_smear != 0xff))
+      return -1;
+  } else {
+    if ((not_bit_b != 1) || (b_smear != 0x0))
+      return -1;
+  }
+  res = (bit_a << 7) | (bit_b << 6) | slice;
+  return res;
+}
+
+LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value)
+{
+  DCHECK(ARM_SINGLEREG(r_dest));
+  if (value == 0) {
+    // TODO: we need better info about the target CPU.  a vector exclusive or
+    //       would probably be better here if we could rely on its existance.
+    // Load an immediate +2.0 (which encodes to 0)
+    NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
+    // +0.0 = +2.0 - +2.0
+    return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
+  } else {
+    int encoded_imm = EncodeImmSingle(value);
+    if (encoded_imm >= 0) {
+      return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
+    }
+  }
+  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
+  if (data_target == NULL) {
+    data_target = AddWordData(&literal_list_, value);
+  }
+  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
+                          r_dest, r15pc, 0, 0, 0, data_target);
+  SetMemRefType(load_pc_rel, true, kLiteral);
+  load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+  AppendLIR(load_pc_rel);
+  return load_pc_rel;
+}
+
+static int LeadingZeros(uint32_t val)
+{
+  uint32_t alt;
+  int n;
+  int count;
+
+  count = 16;
+  n = 32;
+  do {
+    alt = val >> count;
+    if (alt != 0) {
+      n = n - count;
+      val = alt;
+    }
+    count >>= 1;
+  } while (count);
+  return n - val;
+}
+
+/*
+ * Determine whether value can be encoded as a Thumb2 modified
+ * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
+ */
+int ArmMir2Lir::ModifiedImmediate(uint32_t value)
+{
+   int z_leading;
+   int z_trailing;
+   uint32_t b0 = value & 0xff;
+
+   /* Note: case of value==0 must use 0:000:0:0000000 encoding */
+   if (value <= 0xFF)
+     return b0;  // 0:000:a:bcdefgh
+   if (value == ((b0 << 16) | b0))
+     return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
+   if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
+     return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
+   b0 = (value >> 8) & 0xff;
+   if (value == ((b0 << 24) | (b0 << 8)))
+     return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
+   /* Can we do it with rotation? */
+   z_leading = LeadingZeros(value);
+   z_trailing = 32 - LeadingZeros(~value & (value - 1));
+   /* A run of eight or fewer active bits? */
+   if ((z_leading + z_trailing) < 24)
+     return -1;  /* No - bail */
+   /* left-justify the constant, discarding msb (known to be 1) */
+   value <<= z_leading + 1;
+   /* Create bcdefgh */
+   value >>= 25;
+   /* Put it all together */
+   return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
+}
+
+bool ArmMir2Lir::InexpensiveConstantInt(int32_t value)
+{
+  return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+}
+
+bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value)
+{
+  return EncodeImmSingle(value) >= 0;
+}
+
+bool ArmMir2Lir::InexpensiveConstantLong(int64_t value)
+{
+  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
+}
+
+bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value)
+{
+  return EncodeImmDouble(value) >= 0;
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value)
+{
+  LIR* res;
+  int mod_imm;
+
+  if (ARM_FPREG(r_dest)) {
+    return LoadFPConstantValue(r_dest, value);
+  }
+
+  /* See if the value can be constructed cheaply */
+  if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
+    return NewLIR2(kThumbMovImm, r_dest, value);
+  }
+  /* Check Modified immediate special cases */
+  mod_imm = ModifiedImmediate(value);
+  if (mod_imm >= 0) {
+    res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm);
+    return res;
+  }
+  mod_imm = ModifiedImmediate(~value);
+  if (mod_imm >= 0) {
+    res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm);
+    return res;
+  }
+  /* 16-bit immediate? */
+  if ((value & 0xffff) == value) {
+    res = NewLIR2(kThumb2MovImm16, r_dest, value);
+    return res;
+  }
+  /* Do a low/high pair */
+  res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
+  NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
+  return res;
+}
+
+LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target)
+{
+  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
+  res->target = target;
+  return res;
+}
+
+LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
+{
+  LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
+                        ArmConditionEncoding(cc));
+  branch->target = target;
+  return branch;
+}
+
+LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src)
+{
+  ArmOpcode opcode = kThumbBkpt;
+  switch (op) {
+    case kOpBlx:
+      opcode = kThumbBlxR;
+      break;
+    default:
+      LOG(FATAL) << "Bad opcode " << op;
+  }
+  return NewLIR1(opcode, r_dest_src);
+}
+
+LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
+                               int shift)
+{
+  bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
+  ArmOpcode opcode = kThumbBkpt;
+  switch (op) {
+    case kOpAdc:
+      opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
+      break;
+    case kOpAnd:
+      opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
+      break;
+    case kOpBic:
+      opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
+      break;
+    case kOpCmn:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
+      break;
+    case kOpCmp:
+      if (thumb_form)
+        opcode = kThumbCmpRR;
+      else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+        opcode = kThumbCmpHH;
+      else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
+        opcode = kThumbCmpLH;
+      else if (shift == 0)
+        opcode = kThumbCmpHL;
+      else
+        opcode = kThumb2CmpRR;
+      break;
+    case kOpXor:
+      opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
+      break;
+    case kOpMov:
+      DCHECK_EQ(shift, 0);
+      if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
+        opcode = kThumbMovRR;
+      else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+        opcode = kThumbMovRR_H2H;
+      else if (ARM_LOWREG(r_dest_src1))
+        opcode = kThumbMovRR_H2L;
+      else
+        opcode = kThumbMovRR_L2H;
+      break;
+    case kOpMul:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
+      break;
+    case kOpMvn:
+      opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
+      break;
+    case kOpNeg:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
+      break;
+    case kOpOr:
+      opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
+      break;
+    case kOpSbc:
+      opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
+      break;
+    case kOpTst:
+      opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
+      break;
+    case kOpLsl:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
+      break;
+    case kOpLsr:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
+      break;
+    case kOpAsr:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
+      break;
+    case kOpRor:
+      DCHECK_EQ(shift, 0);
+      opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
+      break;
+    case kOpAdd:
+      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
+      break;
+    case kOpSub:
+      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
+      break;
+    case kOp2Byte:
+      DCHECK_EQ(shift, 0);
+      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
+    case kOp2Short:
+      DCHECK_EQ(shift, 0);
+      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
+    case kOp2Char:
+      DCHECK_EQ(shift, 0);
+      return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  DCHECK_GE(static_cast<int>(opcode), 0);
+  if (EncodingMap[opcode].flags & IS_BINARY_OP)
+    return NewLIR2(opcode, r_dest_src1, r_src2);
+  else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
+    if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
+      return NewLIR3(opcode, r_dest_src1, r_src2, shift);
+    else
+      return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
+  } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
+    return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
+  else {
+    LOG(FATAL) << "Unexpected encoding operand count";
+    return NULL;
+  }
+}
+
+LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
+{
+  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
+}
+
+LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
+                                  int r_src2, int shift)
+{
+  ArmOpcode opcode = kThumbBkpt;
+  bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
+      ARM_LOWREG(r_src2);
+  switch (op) {
+    case kOpAdd:
+      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
+      break;
+    case kOpSub:
+      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
+      break;
+    case kOpRsub:
+      opcode = kThumb2RsubRRR;
+      break;
+    case kOpAdc:
+      opcode = kThumb2AdcRRR;
+      break;
+    case kOpAnd:
+      opcode = kThumb2AndRRR;
+      break;
+    case kOpBic:
+      opcode = kThumb2BicRRR;
+      break;
+    case kOpXor:
+      opcode = kThumb2EorRRR;
+      break;
+    case kOpMul:
+      DCHECK_EQ(shift, 0);
+      opcode = kThumb2MulRRR;
+      break;
+    case kOpOr:
+      opcode = kThumb2OrrRRR;
+      break;
+    case kOpSbc:
+      opcode = kThumb2SbcRRR;
+      break;
+    case kOpLsl:
+      DCHECK_EQ(shift, 0);
+      opcode = kThumb2LslRRR;
+      break;
+    case kOpLsr:
+      DCHECK_EQ(shift, 0);
+      opcode = kThumb2LsrRRR;
+      break;
+    case kOpAsr:
+      DCHECK_EQ(shift, 0);
+      opcode = kThumb2AsrRRR;
+      break;
+    case kOpRor:
+      DCHECK_EQ(shift, 0);
+      opcode = kThumb2RorRRR;
+      break;
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  DCHECK_GE(static_cast<int>(opcode), 0);
+  if (EncodingMap[opcode].flags & IS_QUAD_OP)
+    return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
+  else {
+    DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
+    return NewLIR3(opcode, r_dest, r_src1, r_src2);
+  }
+}
+
+LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2)
+{
+  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
+}
+
+LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value)
+{
+  LIR* res;
+  bool neg = (value < 0);
+  int abs_value = (neg) ? -value : value;
+  ArmOpcode opcode = kThumbBkpt;
+  ArmOpcode alt_opcode = kThumbBkpt;
+  bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+  int mod_imm = ModifiedImmediate(value);
+  int mod_imm_neg = ModifiedImmediate(-value);
+
+  switch (op) {
+    case kOpLsl:
+      if (all_low_regs)
+        return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
+      else
+        return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
+    case kOpLsr:
+      if (all_low_regs)
+        return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
+      else
+        return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
+    case kOpAsr:
+      if (all_low_regs)
+        return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
+      else
+        return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
+    case kOpRor:
+      return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
+    case kOpAdd:
+      if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
+        (value <= 1020) && ((value & 0x3)==0)) {
+        return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
+      } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
+          (value <= 1020) && ((value & 0x3)==0)) {
+        return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
+      }
+      // Note: intentional fallthrough
+    case kOpSub:
+      if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
+        if (op == kOpAdd)
+          opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
+        else
+          opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
+        return NewLIR3(opcode, r_dest, r_src1, abs_value);
+      } else if ((abs_value & 0xff) == abs_value) {
+        if (op == kOpAdd)
+          opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
+        else
+          opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
+        return NewLIR3(opcode, r_dest, r_src1, abs_value);
+      }
+      if (mod_imm_neg >= 0) {
+        op = (op == kOpAdd) ? kOpSub : kOpAdd;
+        mod_imm = mod_imm_neg;
+      }
+      if (op == kOpSub) {
+        opcode = kThumb2SubRRI8;
+        alt_opcode = kThumb2SubRRR;
+      } else {
+        opcode = kThumb2AddRRI8;
+        alt_opcode = kThumb2AddRRR;
+      }
+      break;
+    case kOpRsub:
+      opcode = kThumb2RsubRRI8;
+      alt_opcode = kThumb2RsubRRR;
+      break;
+    case kOpAdc:
+      opcode = kThumb2AdcRRI8;
+      alt_opcode = kThumb2AdcRRR;
+      break;
+    case kOpSbc:
+      opcode = kThumb2SbcRRI8;
+      alt_opcode = kThumb2SbcRRR;
+      break;
+    case kOpOr:
+      opcode = kThumb2OrrRRI8;
+      alt_opcode = kThumb2OrrRRR;
+      break;
+    case kOpAnd:
+      opcode = kThumb2AndRRI8;
+      alt_opcode = kThumb2AndRRR;
+      break;
+    case kOpXor:
+      opcode = kThumb2EorRRI8;
+      alt_opcode = kThumb2EorRRR;
+      break;
+    case kOpMul:
+      //TUNING: power of 2, shift & add
+      mod_imm = -1;
+      alt_opcode = kThumb2MulRRR;
+      break;
+    case kOpCmp: {
+      int mod_imm = ModifiedImmediate(value);
+      LIR* res;
+      if (mod_imm >= 0) {
+        res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm);
+      } else {
+        int r_tmp = AllocTemp();
+        res = LoadConstant(r_tmp, value);
+        OpRegReg(kOpCmp, r_src1, r_tmp);
+        FreeTemp(r_tmp);
+      }
+      return res;
+    }
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+  }
+
+  if (mod_imm >= 0) {
+    return NewLIR3(opcode, r_dest, r_src1, mod_imm);
+  } else {
+    int r_scratch = AllocTemp();
+    LoadConstant(r_scratch, value);
+    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+      res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
+    else
+      res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
+    FreeTemp(r_scratch);
+    return res;
+  }
+}
+
+/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
+LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
+{
+  bool neg = (value < 0);
+  int abs_value = (neg) ? -value : value;
+  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
+  ArmOpcode opcode = kThumbBkpt;
+  switch (op) {
+    case kOpAdd:
+      if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+        DCHECK_EQ((value & 0x3), 0);
+        return NewLIR1(kThumbAddSpI7, value >> 2);
+      } else if (short_form) {
+        opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
+      }
+      break;
+    case kOpSub:
+      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+        DCHECK_EQ((value & 0x3), 0);
+        return NewLIR1(kThumbSubSpI7, value >> 2);
+      } else if (short_form) {
+        opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
+      }
+      break;
+    case kOpCmp:
+      if (ARM_LOWREG(r_dest_src1) && short_form)
+        opcode = (short_form) ?  kThumbCmpRI8 : kThumbCmpRR;
+      else if (ARM_LOWREG(r_dest_src1))
+        opcode = kThumbCmpRR;
+      else {
+        short_form = false;
+        opcode = kThumbCmpHL;
+      }
+      break;
+    default:
+      /* Punt to OpRegRegImm - if bad case catch it there */
+      short_form = false;
+      break;
+  }
+  if (short_form)
+    return NewLIR2(opcode, r_dest_src1, abs_value);
+  else {
+    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+  }
+}
+
+LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
+{
+  LIR* res = NULL;
+  int32_t val_lo = Low32Bits(value);
+  int32_t val_hi = High32Bits(value);
+  int target_reg = S2d(r_dest_lo, r_dest_hi);
+  if (ARM_FPREG(r_dest_lo)) {
+    if ((val_lo == 0) && (val_hi == 0)) {
+      // TODO: we need better info about the target CPU.  a vector exclusive or
+      //       would probably be better here if we could rely on its existance.
+      // Load an immediate +2.0 (which encodes to 0)
+      NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
+      // +0.0 = +2.0 - +2.0
+      res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
+    } else {
+      int encoded_imm = EncodeImmDouble(value);
+      if (encoded_imm >= 0) {
+        res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
+      }
+    }
+  } else {
+    if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
+      res = LoadConstantNoClobber(r_dest_lo, val_lo);
+      LoadConstantNoClobber(r_dest_hi, val_hi);
+    }
+  }
+  if (res == NULL) {
+    // No short form - load from the literal pool.
+    LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
+    if (data_target == NULL) {
+      data_target = AddWideData(&literal_list_, val_lo, val_hi);
+    }
+    if (ARM_FPREG(r_dest_lo)) {
+      res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
+                   target_reg, r15pc, 0, 0, 0, data_target);
+    } else {
+      res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
+                   r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
+    }
+    SetMemRefType(res, true, kLiteral);
+    res->alias_info = reinterpret_cast<uintptr_t>(data_target);
+    AppendLIR(res);
+  }
+  return res;
+}
+
+int ArmMir2Lir::EncodeShift(int code, int amount) {
+  return ((amount & 0x1f) << 2) | code;
+}
+
+LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
+                                 int scale, OpSize size)
+{
+  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
+  LIR* load;
+  ArmOpcode opcode = kThumbBkpt;
+  bool thumb_form = (all_low_regs && (scale == 0));
+  int reg_ptr;
+
+  if (ARM_FPREG(r_dest)) {
+    if (ARM_SINGLEREG(r_dest)) {
+      DCHECK((size == kWord) || (size == kSingle));
+      opcode = kThumb2Vldrs;
+      size = kSingle;
+    } else {
+      DCHECK(ARM_DOUBLEREG(r_dest));
+      DCHECK((size == kLong) || (size == kDouble));
+      DCHECK_EQ((r_dest & 0x1), 0);
+      opcode = kThumb2Vldrd;
+      size = kDouble;
+    }
+  } else {
+    if (size == kSingle)
+      size = kWord;
+  }
+
+  switch (size) {
+    case kDouble: // fall-through
+    case kSingle:
+      reg_ptr = AllocTemp();
+      if (scale) {
+        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+                EncodeShift(kArmLsl, scale));
+      } else {
+        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+      }
+      load = NewLIR3(opcode, r_dest, reg_ptr, 0);
+      FreeTemp(reg_ptr);
+      return load;
+    case kWord:
+      opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
+      break;
+    case kUnsignedHalf:
+      opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
+      break;
+    case kSignedHalf:
+      opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
+      break;
+    case kUnsignedByte:
+      opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
+      break;
+    case kSignedByte:
+      opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
+      break;
+    default:
+      LOG(FATAL) << "Bad size: " << size;
+  }
+  if (thumb_form)
+    load = NewLIR3(opcode, r_dest, rBase, r_index);
+  else
+    load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
+
+  return load;
+}
+
+LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+                                  int scale, OpSize size)
+{
+  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
+  LIR* store = NULL;
+  ArmOpcode opcode = kThumbBkpt;
+  bool thumb_form = (all_low_regs && (scale == 0));
+  int reg_ptr;
+
+  if (ARM_FPREG(r_src)) {
+    if (ARM_SINGLEREG(r_src)) {
+      DCHECK((size == kWord) || (size == kSingle));
+      opcode = kThumb2Vstrs;
+      size = kSingle;
+    } else {
+      DCHECK(ARM_DOUBLEREG(r_src));
+      DCHECK((size == kLong) || (size == kDouble));
+      DCHECK_EQ((r_src & 0x1), 0);
+      opcode = kThumb2Vstrd;
+      size = kDouble;
+    }
+  } else {
+    if (size == kSingle)
+      size = kWord;
+  }
+
+  switch (size) {
+    case kDouble: // fall-through
+    case kSingle:
+      reg_ptr = AllocTemp();
+      if (scale) {
+        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+                EncodeShift(kArmLsl, scale));
+      } else {
+        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+      }
+      store = NewLIR3(opcode, r_src, reg_ptr, 0);
+      FreeTemp(reg_ptr);
+      return store;
+    case kWord:
+      opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
+      break;
+    default:
+      LOG(FATAL) << "Bad size: " << size;
+  }
+  if (thumb_form)
+    store = NewLIR3(opcode, r_src, rBase, r_index);
+  else
+    store = NewLIR4(opcode, r_src, rBase, r_index, scale);
+
+  return store;
+}
+
+/*
+ * Load value from base + displacement.  Optionally perform null check
+ * on base (which must have an associated s_reg and MIR).  If not
+ * performing null check, incoming MIR can be null.
+ */
+LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
+                                  int r_dest_hi, OpSize size, int s_reg)
+{
+  LIR* load = NULL;
+  ArmOpcode opcode = kThumbBkpt;
+  bool short_form = false;
+  bool thumb2Form = (displacement < 4092 && displacement >= 0);
+  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+  int encoded_disp = displacement;
+  bool is64bit = false;
+  bool already_generated = false;
+  switch (size) {
+    case kDouble:
+    case kLong:
+      is64bit = true;
+      if (ARM_FPREG(r_dest)) {
+        if (ARM_SINGLEREG(r_dest)) {
+          DCHECK(ARM_FPREG(r_dest_hi));
+          r_dest = S2d(r_dest, r_dest_hi);
+        }
+        opcode = kThumb2Vldrd;
+        if (displacement <= 1020) {
+          short_form = true;
+          encoded_disp >>= 2;
+        }
+        break;
+      } else {
+        if (displacement <= 1020) {
+          load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
+        } else {
+          load = LoadBaseDispBody(rBase, displacement, r_dest,
+                                 -1, kWord, s_reg);
+          LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
+                           -1, kWord, INVALID_SREG);
+        }
+        already_generated = true;
+      }
+    case kSingle:
+    case kWord:
+      if (ARM_FPREG(r_dest)) {
+        opcode = kThumb2Vldrs;
+        if (displacement <= 1020) {
+          short_form = true;
+          encoded_disp >>= 2;
+        }
+        break;
+      }
+      if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
+          (displacement <= 1020) && (displacement >= 0)) {
+        short_form = true;
+        encoded_disp >>= 2;
+        opcode = kThumbLdrPcRel;
+      } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
+          (displacement <= 1020) && (displacement >= 0)) {
+        short_form = true;
+        encoded_disp >>= 2;
+        opcode = kThumbLdrSpRel;
+      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+        DCHECK_EQ((displacement & 0x3), 0);
+        short_form = true;
+        encoded_disp >>= 2;
+        opcode = kThumbLdrRRI5;
+      } else if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2LdrRRI12;
+      }
+      break;
+    case kUnsignedHalf:
+      if (all_low_regs && displacement < 64 && displacement >= 0) {
+        DCHECK_EQ((displacement & 0x1), 0);
+        short_form = true;
+        encoded_disp >>= 1;
+        opcode = kThumbLdrhRRI5;
+      } else if (displacement < 4092 && displacement >= 0) {
+        short_form = true;
+        opcode = kThumb2LdrhRRI12;
+      }
+      break;
+    case kSignedHalf:
+      if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2LdrshRRI12;
+      }
+      break;
+    case kUnsignedByte:
+      if (all_low_regs && displacement < 32 && displacement >= 0) {
+        short_form = true;
+        opcode = kThumbLdrbRRI5;
+      } else if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2LdrbRRI12;
+      }
+      break;
+    case kSignedByte:
+      if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2LdrsbRRI12;
+      }
+      break;
+    default:
+      LOG(FATAL) << "Bad size: " << size;
+  }
+
+  if (!already_generated) {
+    if (short_form) {
+      load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
+    } else {
+      int reg_offset = AllocTemp();
+      LoadConstant(reg_offset, encoded_disp);
+      load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
+      FreeTemp(reg_offset);
+    }
+  }
+
+  // TODO: in future may need to differentiate Dalvik accesses w/ spills
+  if (rBase == rARM_SP) {
+    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
+  }
+  return load;
+}
+
+LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg)
+{
+  return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
+}
+
+LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg)
+{
+  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+
+LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
+                                   int r_src, int r_src_hi, OpSize size) {
+  LIR* store = NULL;
+  ArmOpcode opcode = kThumbBkpt;
+  bool short_form = false;
+  bool thumb2Form = (displacement < 4092 && displacement >= 0);
+  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+  int encoded_disp = displacement;
+  bool is64bit = false;
+  bool already_generated = false;
+  switch (size) {
+    case kLong:
+    case kDouble:
+      is64bit = true;
+      if (!ARM_FPREG(r_src)) {
+        if (displacement <= 1020) {
+          store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
+        } else {
+          store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
+          StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
+        }
+        already_generated = true;
+      } else {
+        if (ARM_SINGLEREG(r_src)) {
+          DCHECK(ARM_FPREG(r_src_hi));
+          r_src = S2d(r_src, r_src_hi);
+        }
+        opcode = kThumb2Vstrd;
+        if (displacement <= 1020) {
+          short_form = true;
+          encoded_disp >>= 2;
+        }
+      }
+      break;
+    case kSingle:
+    case kWord:
+      if (ARM_FPREG(r_src)) {
+        DCHECK(ARM_SINGLEREG(r_src));
+        opcode = kThumb2Vstrs;
+        if (displacement <= 1020) {
+          short_form = true;
+          encoded_disp >>= 2;
+        }
+        break;
+      }
+      if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
+          (displacement <= 1020) && (displacement >= 0)) {
+        short_form = true;
+        encoded_disp >>= 2;
+        opcode = kThumbStrSpRel;
+      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+        DCHECK_EQ((displacement & 0x3), 0);
+        short_form = true;
+        encoded_disp >>= 2;
+        opcode = kThumbStrRRI5;
+      } else if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2StrRRI12;
+      }
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      if (all_low_regs && displacement < 64 && displacement >= 0) {
+        DCHECK_EQ((displacement & 0x1), 0);
+        short_form = true;
+        encoded_disp >>= 1;
+        opcode = kThumbStrhRRI5;
+      } else if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2StrhRRI12;
+      }
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      if (all_low_regs && displacement < 32 && displacement >= 0) {
+        short_form = true;
+        opcode = kThumbStrbRRI5;
+      } else if (thumb2Form) {
+        short_form = true;
+        opcode = kThumb2StrbRRI12;
+      }
+      break;
+    default:
+      LOG(FATAL) << "Bad size: " << size;
+  }
+  if (!already_generated) {
+    if (short_form) {
+      store = NewLIR3(opcode, r_src, rBase, encoded_disp);
+    } else {
+      int r_scratch = AllocTemp();
+      LoadConstant(r_scratch, encoded_disp);
+      store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
+      FreeTemp(r_scratch);
+    }
+  }
+
+  // TODO: In future, may need to differentiate Dalvik & spill accesses
+  if (rBase == rARM_SP) {
+    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
+  }
+  return store;
+}
+
+LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
+                               OpSize size)
+{
+  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
+}
+
+LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
+                                   int r_src_lo, int r_src_hi)
+{
+  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
+}
+
+LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src)
+{
+  int opcode;
+  DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
+  if (ARM_DOUBLEREG(r_dest)) {
+    opcode = kThumb2Vmovd;
+  } else {
+    if (ARM_SINGLEREG(r_dest)) {
+      opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
+    } else {
+      DCHECK(ARM_SINGLEREG(r_src));
+      opcode = kThumb2Fmrs;
+    }
+  }
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset)
+{
+  LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
+  return NULL;
+}
+
+LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp)
+{
+  LOG(FATAL) << "Unexpected use of OpMem for Arm";
+  return NULL;
+}
+
+LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg)
+{
+  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
+  return NULL;
+}
+
+LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset)
+{
+  LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
+  return NULL;
+}
+
+LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg)
+{
+  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
+  return NULL;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
new file mode 100644
index 0000000..5c10c4c
--- /dev/null
+++ b/compiler/dex/quick/codegen_util.cc
@@ -0,0 +1,1109 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_internals.h"
+#include "dex_file-inl.h"
+#include "gc_map.h"
+#include "mir_to_lir-inl.h"
+#include "verifier/dex_gc_map.h"
+#include "verifier/method_verifier.h"
+
+namespace art {
+
+bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src)
+{
+  bool res = false;
+  if (rl_src.is_const) {
+    if (rl_src.wide) {
+      if (rl_src.fp) {
+         res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
+      } else {
+         res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
+      }
+    } else {
+      if (rl_src.fp) {
+         res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
+      } else {
+         res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
+      }
+    }
+  }
+  return res;
+}
+
+void Mir2Lir::MarkSafepointPC(LIR* inst)
+{
+  inst->def_mask = ENCODE_ALL;
+  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
+  DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
+}
+
+bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put)
+{
+  return cu_->compiler_driver->ComputeInstanceFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
+}
+
+/* Convert an instruction to a NOP */
+void Mir2Lir::NopLIR( LIR* lir)
+{
+  lir->flags.is_nop = true;
+}
+
+void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type)
+{
+  uint64_t *mask_ptr;
+  uint64_t mask = ENCODE_MEM;;
+  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
+  if (is_load) {
+    mask_ptr = &lir->use_mask;
+  } else {
+    mask_ptr = &lir->def_mask;
+  }
+  /* Clear out the memref flags */
+  *mask_ptr &= ~mask;
+  /* ..and then add back the one we need */
+  switch (mem_type) {
+    case kLiteral:
+      DCHECK(is_load);
+      *mask_ptr |= ENCODE_LITERAL;
+      break;
+    case kDalvikReg:
+      *mask_ptr |= ENCODE_DALVIK_REG;
+      break;
+    case kHeapRef:
+      *mask_ptr |= ENCODE_HEAP_REF;
+      break;
+    case kMustNotAlias:
+      /* Currently only loads can be marked as kMustNotAlias */
+      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
+      *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
+      break;
+    default:
+      LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
+  }
+}
+
+/*
+ * Mark load/store instructions that access Dalvik registers through the stack.
+ */
+void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
+                                      bool is64bit)
+{
+  SetMemRefType(lir, is_load, kDalvikReg);
+
+  /*
+   * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
+   * access.
+   */
+  lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
+}
+
+/*
+ * Debugging macros
+ */
+#define DUMP_RESOURCE_MASK(X)
+
+/* Pretty-print a LIR instruction */
+void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr)
+{
+  int offset = lir->offset;
+  int dest = lir->operands[0];
+  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
+
+  /* Handle pseudo-ops individually, and all regular insns as a group */
+  switch (lir->opcode) {
+    case kPseudoMethodEntry:
+      LOG(INFO) << "-------- method entry "
+                << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+      break;
+    case kPseudoMethodExit:
+      LOG(INFO) << "-------- Method_Exit";
+      break;
+    case kPseudoBarrier:
+      LOG(INFO) << "-------- BARRIER";
+      break;
+    case kPseudoEntryBlock:
+      LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
+      break;
+    case kPseudoDalvikByteCodeBoundary:
+      if (lir->operands[0] == 0) {
+         lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
+      }
+      LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
+                << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
+      break;
+    case kPseudoExitBlock:
+      LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
+      break;
+    case kPseudoPseudoAlign4:
+      LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
+                << offset << "): .align4";
+      break;
+    case kPseudoEHBlockLabel:
+      LOG(INFO) << "Exception_Handling:";
+      break;
+    case kPseudoTargetLabel:
+    case kPseudoNormalBlockLabel:
+      LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
+      break;
+    case kPseudoThrowTarget:
+      LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
+      break;
+    case kPseudoIntrinsicRetry:
+      LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
+      break;
+    case kPseudoSuspendTarget:
+      LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
+      break;
+    case kPseudoSafepointPC:
+      LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
+      break;
+    case kPseudoExportedPC:
+      LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
+      break;
+    case kPseudoCaseLabel:
+      LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
+                << std::hex << lir->operands[0] << "|" << std::dec <<
+        lir->operands[0];
+      break;
+    default:
+      if (lir->flags.is_nop && !dump_nop) {
+        break;
+      } else {
+        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
+                                               lir, base_addr));
+        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
+                                                    lir, base_addr));
+        LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
+                                  reinterpret_cast<unsigned int>(base_addr + offset),
+                                  op_name.c_str(), op_operands.c_str(),
+                                  lir->flags.is_nop ? "(nop)" : "");
+      }
+      break;
+  }
+
+  if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
+    DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
+  }
+  if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
+    DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
+  }
+}
+
+void Mir2Lir::DumpPromotionMap()
+{
+  int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
+  for (int i = 0; i < num_regs; i++) {
+    PromotionMap v_reg_map = promotion_map_[i];
+    std::string buf;
+    if (v_reg_map.fp_location == kLocPhysReg) {
+      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
+    }
+
+    std::string buf3;
+    if (i < cu_->num_dalvik_registers) {
+      StringAppendF(&buf3, "%02d", i);
+    } else if (i == mir_graph_->GetMethodSReg()) {
+      buf3 = "Method*";
+    } else {
+      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
+    }
+
+    LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
+                              v_reg_map.core_location == kLocPhysReg ?
+                              "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
+                              v_reg_map.core_reg : SRegOffset(i),
+                              buf.c_str());
+  }
+}
+
+/* Dump a mapping table */
+void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor,
+                               const std::string& name, const std::string& signature,
+                               const std::vector<uint32_t>& v) {
+  if (v.size() > 0) {
+    std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
+                     descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
+    std::replace(line.begin(), line.end(), ';', '_');
+    LOG(INFO) << line;
+    for (uint32_t i = 0; i < v.size(); i+=2) {
+      line = StringPrintf("    {0x%05x, 0x%04x},", v[i], v[i+1]);
+      LOG(INFO) << line;
+    }
+    LOG(INFO) <<"  };\n\n";
+  }
+}
+
+/* Dump instructions and constant pool contents */
+void Mir2Lir::CodegenDump()
+{
+  LOG(INFO) << "Dumping LIR insns for "
+            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  LIR* lir_insn;
+  int insns_size = cu_->code_item->insns_size_in_code_units_;
+
+  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
+  LOG(INFO) << "Ins          : " << cu_->num_ins;
+  LOG(INFO) << "Outs         : " << cu_->num_outs;
+  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
+  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
+  LOG(INFO) << "CompilerTemps    : " << cu_->num_compiler_temps;
+  LOG(INFO) << "Frame size       : " << frame_size_;
+  LOG(INFO) << "code size is " << total_size_ <<
+    " bytes, Dalvik size is " << insns_size * 2;
+  LOG(INFO) << "expansion factor: "
+            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
+  DumpPromotionMap();
+  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+    DumpLIRInsn(lir_insn, 0);
+  }
+  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+    LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
+                              lir_insn->operands[0]);
+  }
+
+  const DexFile::MethodId& method_id =
+      cu_->dex_file->GetMethodId(cu_->method_idx);
+  std::string signature(cu_->dex_file->GetMethodSignature(method_id));
+  std::string name(cu_->dex_file->GetMethodName(method_id));
+  std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+
+  // Dump mapping tables
+  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
+  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
+}
+
+/*
+ * Search the existing constants in the literal pool for an exact or close match
+ * within specified delta (greater or equal to 0).
+ */
+LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta)
+{
+  while (data_target) {
+    if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
+      return data_target;
+    data_target = data_target->next;
+  }
+  return NULL;
+}
+
+/* Search the existing constants in the literal pool for an exact wide match */
+LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi)
+{
+  bool lo_match = false;
+  LIR* lo_target = NULL;
+  while (data_target) {
+    if (lo_match && (data_target->operands[0] == val_hi)) {
+      // Record high word in case we need to expand this later.
+      lo_target->operands[1] = val_hi;
+      return lo_target;
+    }
+    lo_match = false;
+    if (data_target->operands[0] == val_lo) {
+      lo_match = true;
+      lo_target = data_target;
+    }
+    data_target = data_target->next;
+  }
+  return NULL;
+}
+
+/*
+ * The following are building blocks to insert constants into the pool or
+ * instruction streams.
+ */
+
+/* Add a 32-bit constant to the constant pool */
+LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value)
+{
+  /* Add the constant to the literal pool */
+  if (constant_list_p) {
+    LIR* new_value = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData));
+    new_value->operands[0] = value;
+    new_value->next = *constant_list_p;
+    *constant_list_p = new_value;
+    return new_value;
+  }
+  return NULL;
+}
+
+/* Add a 64-bit constant to the constant pool or mixed with code */
+LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi)
+{
+  AddWordData(constant_list_p, val_hi);
+  return AddWordData(constant_list_p, val_lo);
+}
+
+static void PushWord(std::vector<uint8_t>&buf, int data) {
+  buf.push_back( data & 0xff);
+  buf.push_back( (data >> 8) & 0xff);
+  buf.push_back( (data >> 16) & 0xff);
+  buf.push_back( (data >> 24) & 0xff);
+}
+
+static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
+  while (buf.size() < offset) {
+    buf.push_back(0);
+  }
+}
+
+/* Write the literal pool to the output stream */
+void Mir2Lir::InstallLiteralPools()
+{
+  AlignBuffer(code_buffer_, data_offset_);
+  LIR* data_lir = literal_list_;
+  while (data_lir != NULL) {
+    PushWord(code_buffer_, data_lir->operands[0]);
+    data_lir = NEXT_LIR(data_lir);
+  }
+  // Push code and method literals, record offsets for the compiler to patch.
+  data_lir = code_literal_list_;
+  while (data_lir != NULL) {
+    uint32_t target = data_lir->operands[0];
+    cu_->compiler_driver->AddCodePatch(cu_->dex_file,
+                                      cu_->method_idx,
+                                      cu_->invoke_type,
+                                      target,
+                                      static_cast<InvokeType>(data_lir->operands[1]),
+                                      code_buffer_.size());
+    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
+    // unique based on target to ensure code deduplication works
+    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+    PushWord(code_buffer_, unique_patch_value);
+    data_lir = NEXT_LIR(data_lir);
+  }
+  data_lir = method_literal_list_;
+  while (data_lir != NULL) {
+    uint32_t target = data_lir->operands[0];
+    cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
+                                        cu_->method_idx,
+                                        cu_->invoke_type,
+                                        target,
+                                        static_cast<InvokeType>(data_lir->operands[1]),
+                                        code_buffer_.size());
+    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
+    // unique based on target to ensure code deduplication works
+    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+    PushWord(code_buffer_, unique_patch_value);
+    data_lir = NEXT_LIR(data_lir);
+  }
+}
+
+/* Write the switch tables to the output stream */
+void Mir2Lir::InstallSwitchTables()
+{
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
+  while (true) {
+    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
+    if (tab_rec == NULL) break;
+    AlignBuffer(code_buffer_, tab_rec->offset);
+    /*
+     * For Arm, our reference point is the address of the bx
+     * instruction that does the launch, so we have to subtract
+     * the auto pc-advance.  For other targets the reference point
+     * is a label, so we can use the offset as-is.
+     */
+    int bx_offset = INVALID_OFFSET;
+    switch (cu_->instruction_set) {
+      case kThumb2:
+        bx_offset = tab_rec->anchor->offset + 4;
+        break;
+      case kX86:
+        bx_offset = 0;
+        break;
+      case kMips:
+        bx_offset = tab_rec->anchor->offset;
+        break;
+      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
+    }
+    if (cu_->verbose) {
+      LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
+    }
+    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+      const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+        int disp = tab_rec->targets[elems]->offset - bx_offset;
+        if (cu_->verbose) {
+          LOG(INFO) << "  Case[" << elems << "] key: 0x"
+                    << std::hex << keys[elems] << ", disp: 0x"
+                    << std::hex << disp;
+        }
+        PushWord(code_buffer_, keys[elems]);
+        PushWord(code_buffer_,
+          tab_rec->targets[elems]->offset - bx_offset);
+      }
+    } else {
+      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
+                static_cast<int>(Instruction::kPackedSwitchSignature));
+      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+        int disp = tab_rec->targets[elems]->offset - bx_offset;
+        if (cu_->verbose) {
+          LOG(INFO) << "  Case[" << elems << "] disp: 0x"
+                    << std::hex << disp;
+        }
+        PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
+      }
+    }
+  }
+}
+
+/* Write the fill array dta to the output stream */
+void Mir2Lir::InstallFillArrayData()
+{
+  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
+  while (true) {
+    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
+    if (tab_rec == NULL) break;
+    AlignBuffer(code_buffer_, tab_rec->offset);
+    for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
+      code_buffer_.push_back( tab_rec->table[i] & 0xFF);
+      code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+    }
+  }
+}
+
+static int AssignLiteralOffsetCommon(LIR* lir, int offset)
+{
+  for (;lir != NULL; lir = lir->next) {
+    lir->offset = offset;
+    offset += 4;
+  }
+  return offset;
+}
+
+// Make sure we have a code address for every declared catch entry
+bool Mir2Lir::VerifyCatchEntries()
+{
+  bool success = true;
+  for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
+       it != mir_graph_->catches_.end(); ++it) {
+    uint32_t dex_pc = *it;
+    bool found = false;
+    for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
+      if (dex_pc == dex2pc_mapping_table_[i+1]) {
+        found = true;
+        break;
+      }
+    }
+    if (!found) {
+      LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
+      success = false;
+    }
+  }
+  // Now, try in the other direction
+  for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
+    uint32_t dex_pc = dex2pc_mapping_table_[i+1];
+    if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
+      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
+      success = false;
+    }
+  }
+  if (!success) {
+    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
+              << dex2pc_mapping_table_.size()/2;
+  }
+  return success;
+}
+
+
+void Mir2Lir::CreateMappingTables()
+{
+  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
+      pc2dex_mapping_table_.push_back(tgt_lir->offset);
+      pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
+    }
+    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
+      dex2pc_mapping_table_.push_back(tgt_lir->offset);
+      dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
+    }
+  }
+  if (kIsDebugBuild) {
+    DCHECK(VerifyCatchEntries());
+  }
+  combined_mapping_table_.push_back(pc2dex_mapping_table_.size() +
+                                        dex2pc_mapping_table_.size());
+  combined_mapping_table_.push_back(pc2dex_mapping_table_.size());
+  combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(),
+                                 pc2dex_mapping_table_.end());
+  combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(),
+                                 dex2pc_mapping_table_.end());
+}
+
+class NativePcToReferenceMapBuilder {
+ public:
+  NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
+                                size_t entries, uint32_t max_native_offset,
+                                size_t references_width) : entries_(entries),
+                                references_width_(references_width), in_use_(entries),
+                                table_(table) {
+    // Compute width in bytes needed to hold max_native_offset.
+    native_offset_width_ = 0;
+    while (max_native_offset != 0) {
+      native_offset_width_++;
+      max_native_offset >>= 8;
+    }
+    // Resize table and set up header.
+    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
+    CHECK_LT(native_offset_width_, 1U << 3);
+    (*table)[0] = native_offset_width_ & 7;
+    CHECK_LT(references_width_, 1U << 13);
+    (*table)[0] |= (references_width_ << 3) & 0xFF;
+    (*table)[1] = (references_width_ >> 5) & 0xFF;
+    CHECK_LT(entries, 1U << 16);
+    (*table)[2] = entries & 0xFF;
+    (*table)[3] = (entries >> 8) & 0xFF;
+  }
+
+  void AddEntry(uint32_t native_offset, const uint8_t* references) {
+    size_t table_index = TableIndex(native_offset);
+    while (in_use_[table_index]) {
+      table_index = (table_index + 1) % entries_;
+    }
+    in_use_[table_index] = true;
+    SetNativeOffset(table_index, native_offset);
+    DCHECK_EQ(native_offset, GetNativeOffset(table_index));
+    SetReferences(table_index, references);
+  }
+
+ private:
+  size_t TableIndex(uint32_t native_offset) {
+    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
+  }
+
+  uint32_t GetNativeOffset(size_t table_index) {
+    uint32_t native_offset = 0;
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    for (size_t i = 0; i < native_offset_width_; i++) {
+      native_offset |= (*table_)[table_offset + i] << (i * 8);
+    }
+    return native_offset;
+  }
+
+  void SetNativeOffset(size_t table_index, uint32_t native_offset) {
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    for (size_t i = 0; i < native_offset_width_; i++) {
+      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
+    }
+  }
+
+  void SetReferences(size_t table_index, const uint8_t* references) {
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
+  }
+
+  size_t EntryWidth() const {
+    return native_offset_width_ + references_width_;
+  }
+
+  // Number of entries in the table.
+  const size_t entries_;
+  // Number of bytes used to encode the reference bitmap.
+  const size_t references_width_;
+  // Number of bytes used to encode a native offset.
+  size_t native_offset_width_;
+  // Entries that are in use.
+  std::vector<bool> in_use_;
+  // The table we're building.
+  std::vector<uint8_t>* const table_;
+};
+
+void Mir2Lir::CreateNativeGcMap() {
+  const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
+  uint32_t max_native_offset = 0;
+  for (size_t i = 0; i < mapping_table.size(); i += 2) {
+    uint32_t native_offset = mapping_table[i + 0];
+    if (native_offset > max_native_offset) {
+      max_native_offset = native_offset;
+    }
+  }
+  MethodReference method_ref(cu_->dex_file, cu_->method_idx);
+  const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
+  verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
+  // Compute native offset to references size.
+  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
+                                                      mapping_table.size() / 2, max_native_offset,
+                                                      dex_gc_map.RegWidth());
+
+  for (size_t i = 0; i < mapping_table.size(); i += 2) {
+    uint32_t native_offset = mapping_table[i + 0];
+    uint32_t dex_pc = mapping_table[i + 1];
+    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
+    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
+    native_gc_map_builder.AddEntry(native_offset, references);
+  }
+}
+
+/* Determine the offset of each literal field */
+int Mir2Lir::AssignLiteralOffset(int offset)
+{
+  offset = AssignLiteralOffsetCommon(literal_list_, offset);
+  offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
+  offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
+  return offset;
+}
+
+int Mir2Lir::AssignSwitchTablesOffset(int offset)
+{
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
+  while (true) {
+    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
+    if (tab_rec == NULL) break;
+    tab_rec->offset = offset;
+    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+      offset += tab_rec->table[1] * (sizeof(int) * 2);
+    } else {
+      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
+                static_cast<int>(Instruction::kPackedSwitchSignature));
+      offset += tab_rec->table[1] * sizeof(int);
+    }
+  }
+  return offset;
+}
+
+int Mir2Lir::AssignFillArrayDataOffset(int offset)
+{
+  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
+  while (true) {
+    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
+    if (tab_rec == NULL) break;
+    tab_rec->offset = offset;
+    offset += tab_rec->size;
+    // word align
+    offset = (offset + 3) & ~3;
+    }
+  return offset;
+}
+
+// LIR offset assignment.
+int Mir2Lir::AssignInsnOffsets()
+{
+  LIR* lir;
+  int offset = 0;
+
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    lir->offset = offset;
+    if (lir->opcode >= 0) {
+      if (!lir->flags.is_nop) {
+        offset += lir->flags.size;
+      }
+    } else if (lir->opcode == kPseudoPseudoAlign4) {
+      if (offset & 0x2) {
+        offset += 2;
+        lir->operands[0] = 1;
+      } else {
+        lir->operands[0] = 0;
+      }
+    }
+    /* Pseudo opcodes don't consume space */
+  }
+
+  return offset;
+}
+
+/*
+ * Walk the compilation unit and assign offsets to instructions
+ * and literals and compute the total size of the compiled unit.
+ */
+void Mir2Lir::AssignOffsets()
+{
+  int offset = AssignInsnOffsets();
+
+  /* Const values have to be word aligned */
+  offset = (offset + 3) & ~3;
+
+  /* Set up offsets for literals */
+  data_offset_ = offset;
+
+  offset = AssignLiteralOffset(offset);
+
+  offset = AssignSwitchTablesOffset(offset);
+
+  offset = AssignFillArrayDataOffset(offset);
+
+  total_size_ = offset;
+}
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ */
+void Mir2Lir::AssembleLIR()
+{
+  AssignOffsets();
+  int assembler_retries = 0;
+  /*
+   * Assemble here.  Note that we generate code with optimistic assumptions
+   * and if found now to work, we'll have to redo the sequence and retry.
+   */
+
+  while (true) {
+    AssemblerStatus res = AssembleInstructions(0);
+    if (res == kSuccess) {
+      break;
+    } else {
+      assembler_retries++;
+      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
+        CodegenDump();
+        LOG(FATAL) << "Assembler error - too many retries";
+      }
+      // Redo offsets and try again
+      AssignOffsets();
+      code_buffer_.clear();
+    }
+  }
+
+  // Install literals
+  InstallLiteralPools();
+
+  // Install switch tables
+  InstallSwitchTables();
+
+  // Install fill array data
+  InstallFillArrayData();
+
+  // Create the mapping table and native offset to reference map.
+  CreateMappingTables();
+
+  CreateNativeGcMap();
+}
+
+/*
+ * Insert a kPseudoCaseLabel at the beginning of the Dalvik
+ * offset vaddr.  This label will be used to fix up the case
+ * branch table during the assembly phase.  Be sure to set
+ * all resource flags on this to prevent code motion across
+ * target boundaries.  KeyVal is just there for debugging.
+ */
+LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal)
+{
+  SafeMap<unsigned int, LIR*>::iterator it;
+  it = boundary_map_.find(vaddr);
+  if (it == boundary_map_.end()) {
+    LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
+  }
+  LIR* new_label = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+  new_label->dalvik_offset = vaddr;
+  new_label->opcode = kPseudoCaseLabel;
+  new_label->operands[0] = keyVal;
+  InsertLIRAfter(it->second, new_label);
+  return new_label;
+}
+
+void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec)
+{
+  const uint16_t* table = tab_rec->table;
+  int base_vaddr = tab_rec->vaddr;
+  const int *targets = reinterpret_cast<const int*>(&table[4]);
+  int entries = table[1];
+  int low_key = s4FromSwitchData(&table[2]);
+  for (int i = 0; i < entries; i++) {
+    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
+  }
+}
+
+void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec)
+{
+  const uint16_t* table = tab_rec->table;
+  int base_vaddr = tab_rec->vaddr;
+  int entries = table[1];
+  const int* keys = reinterpret_cast<const int*>(&table[2]);
+  const int* targets = &keys[entries];
+  for (int i = 0; i < entries; i++) {
+    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
+  }
+}
+
+void Mir2Lir::ProcessSwitchTables()
+{
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
+  while (true) {
+    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
+    if (tab_rec == NULL) break;
+    if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
+      MarkPackedCaseLabels(tab_rec);
+    } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+      MarkSparseCaseLabels(tab_rec);
+    } else {
+      LOG(FATAL) << "Invalid switch table";
+    }
+  }
+}
+
+void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table)
+  /*
+   * Sparse switch data format:
+   *  ushort ident = 0x0200   magic value
+   *  ushort size       number of entries in the table; > 0
+   *  int keys[size]      keys, sorted low-to-high; 32-bit aligned
+   *  int targets[size]     branch targets, relative to switch opcode
+   *
+   * Total size is (2+size*4) 16-bit code units.
+   */
+{
+  uint16_t ident = table[0];
+  int entries = table[1];
+  const int* keys = reinterpret_cast<const int*>(&table[2]);
+  const int* targets = &keys[entries];
+  LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
+            << ", entries: " << std::dec << entries;
+  for (int i = 0; i < entries; i++) {
+    LOG(INFO) << "  Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
+  }
+}
+
+void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table)
+  /*
+   * Packed switch data format:
+   *  ushort ident = 0x0100   magic value
+   *  ushort size       number of entries in the table
+   *  int first_key       first (and lowest) switch case value
+   *  int targets[size]     branch targets, relative to switch opcode
+   *
+   * Total size is (4+size*2) 16-bit code units.
+   */
+{
+  uint16_t ident = table[0];
+  const int* targets = reinterpret_cast<const int*>(&table[4]);
+  int entries = table[1];
+  int low_key = s4FromSwitchData(&table[2]);
+  LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
+            << ", entries: " << std::dec << entries << ", low_key: " << low_key;
+  for (int i = 0; i < entries; i++) {
+    LOG(INFO) << "  Key[" << (i + low_key) << "] -> 0x" << std::hex
+              << targets[i];
+  }
+}
+
+/*
+ * Set up special LIR to mark a Dalvik byte-code instruction start and
+ * record it in the boundary_map.  NOTE: in cases such as kMirOpCheck in
+ * which we split a single Dalvik instruction, only the first MIR op
+ * associated with a Dalvik PC should be entered into the map.
+ */
+LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str)
+{
+  LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+  if (boundary_map_.find(offset) == boundary_map_.end()) {
+    boundary_map_.Put(offset, res);
+  }
+  return res;
+}
+
+bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2)
+{
+  bool is_taken;
+  switch (opcode) {
+    case Instruction::IF_EQ: is_taken = (src1 == src2); break;
+    case Instruction::IF_NE: is_taken = (src1 != src2); break;
+    case Instruction::IF_LT: is_taken = (src1 < src2); break;
+    case Instruction::IF_GE: is_taken = (src1 >= src2); break;
+    case Instruction::IF_GT: is_taken = (src1 > src2); break;
+    case Instruction::IF_LE: is_taken = (src1 <= src2); break;
+    case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
+    case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
+    case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
+    case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
+    case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
+    case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
+    default:
+      LOG(FATAL) << "Unexpected opcode " << opcode;
+      is_taken = false;
+  }
+  return is_taken;
+}
+
+// Convert relation of src1/src2 to src2/src1
+ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
+  ConditionCode res;
+  switch (before) {
+    case kCondEq: res = kCondEq; break;
+    case kCondNe: res = kCondNe; break;
+    case kCondLt: res = kCondGt; break;
+    case kCondGt: res = kCondLt; break;
+    case kCondLe: res = kCondGe; break;
+    case kCondGe: res = kCondLe; break;
+    default:
+      res = static_cast<ConditionCode>(0);
+      LOG(FATAL) << "Unexpected ccode " << before;
+  }
+  return res;
+}
+
+// TODO: move to mir_to_lir.cc
+Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Backend(arena),
+      literal_list_(NULL),
+      method_literal_list_(NULL),
+      code_literal_list_(NULL),
+      cu_(cu),
+      mir_graph_(mir_graph),
+      switch_tables_(arena, 4, kGrowableArraySwitchTables),
+      fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
+      throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
+      suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
+      intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
+      data_offset_(0),
+      total_size_(0),
+      block_label_list_(NULL),
+      current_dalvik_offset_(0),
+      reg_pool_(NULL),
+      live_sreg_(0),
+      num_core_spills_(0),
+      num_fp_spills_(0),
+      frame_size_(0),
+      core_spill_mask_(0),
+      fp_spill_mask_(0),
+      first_lir_insn_(NULL),
+      last_lir_insn_(NULL)
+ {
+  promotion_map_ = static_cast<PromotionMap*>
+      (arena_->NewMem((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
+                      sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc));
+}
+
+void Mir2Lir::Materialize() {
+  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
+
+  /* Allocate Registers using simple local allocation scheme */
+  SimpleRegAlloc();
+
+  //FIXME: re-enable by retrieving from mir_graph
+  SpecialCaseHandler special_case = kNoHandler;
+
+  if (special_case != kNoHandler) {
+      /*
+       * Custom codegen for special cases.  If for any reason the
+       * special codegen doesn't succeed, first_lir_insn_ will
+       * set to NULL;
+       */
+      SpecialMIR2LIR(special_case);
+    }
+
+  /* Convert MIR to LIR, etc. */
+  if (first_lir_insn_ == NULL) {
+    MethodMIR2LIR();
+  }
+
+  /* Method is not empty */
+  if (first_lir_insn_) {
+
+    // mark the targets of switch statement case labels
+    ProcessSwitchTables();
+
+    /* Convert LIR into machine code. */
+    AssembleLIR();
+
+    if (cu_->verbose) {
+      CodegenDump();
+    }
+
+  }
+
+}
+
+CompiledMethod* Mir2Lir::GetCompiledMethod() {
+  // Combine vmap tables - core regs, then fp regs - into vmap_table
+  std::vector<uint16_t> vmap_table;
+  // Core regs may have been inserted out of order - sort first
+  std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
+  for (size_t i = 0 ; i < core_vmap_table_.size(); i++) {
+    // Copy, stripping out the phys register sort key
+    vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
+  }
+  // If we have a frame, push a marker to take place of lr
+  if (frame_size_ > 0) {
+    vmap_table.push_back(INVALID_VREG);
+  } else {
+    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
+    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
+  }
+  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
+  for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
+    vmap_table.push_back(fp_vmap_table_[i]);
+  }
+  CompiledMethod* result =
+      new CompiledMethod(cu_->instruction_set, code_buffer_,
+                         frame_size_, core_spill_mask_, fp_spill_mask_,
+                         combined_mapping_table_, vmap_table, native_gc_map_);
+  return result;
+}
+
+int Mir2Lir::ComputeFrameSize() {
+  /* Figure out the frame size */
+  static const uint32_t kAlignMask = kStackAlignment - 1;
+  uint32_t size = (num_core_spills_ + num_fp_spills_ +
+                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
+                   cu_->num_compiler_temps + 1 /* cur_method* */)
+                   * sizeof(uint32_t);
+  /* Align and set */
+  return (size + kAlignMask) & ~(kAlignMask);
+}
+
+/*
+ * Append an LIR instruction to the LIR list maintained by a compilation
+ * unit
+ */
+void Mir2Lir::AppendLIR(LIR* lir)
+{
+  if (first_lir_insn_ == NULL) {
+    DCHECK(last_lir_insn_ == NULL);
+    last_lir_insn_ = first_lir_insn_ = lir;
+    lir->prev = lir->next = NULL;
+  } else {
+    last_lir_insn_->next = lir;
+    lir->prev = last_lir_insn_;
+    lir->next = NULL;
+    last_lir_insn_ = lir;
+  }
+}
+
+/*
+ * Insert an LIR instruction before the current instruction, which cannot be the
+ * first instruction.
+ *
+ * prev_lir <-> new_lir <-> current_lir
+ */
+void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir)
+{
+  DCHECK(current_lir->prev != NULL);
+  LIR *prev_lir = current_lir->prev;
+
+  prev_lir->next = new_lir;
+  new_lir->prev = prev_lir;
+  new_lir->next = current_lir;
+  current_lir->prev = new_lir;
+}
+
+/*
+ * Insert an LIR instruction after the current instruction, which cannot be the
+ * first instruction.
+ *
+ * current_lir -> new_lir -> old_next
+ */
+void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir)
+{
+  new_lir->prev = current_lir;
+  new_lir->next = current_lir->next;
+  current_lir->next = new_lir;
+  new_lir->next->prev = new_lir;
+}
+
+
+} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
new file mode 100644
index 0000000..865b9c5
--- /dev/null
+++ b/compiler/dex/quick/gen_common.cc
@@ -0,0 +1,1800 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_ir.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mirror/array.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "verifier/method_verifier.h"
+
+namespace art {
+
+/*
+ * This source files contains "gen" codegen routines that should
+ * be applicable to most targets.  Only mid-level support utilities
+ * and "op" calls may be used here.
+ */
+
+/*
+ * Generate an kPseudoBarrier marker to indicate the boundary of special
+ * blocks.
+ */
+void Mir2Lir::GenBarrier()
+{
+  LIR* barrier = NewLIR0(kPseudoBarrier);
+  /* Mark all resources as being clobbered */
+  barrier->def_mask = -1;
+}
+
+// FIXME: need to do some work to split out targets with
+// condition codes and those without
+LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind)
+{
+  DCHECK_NE(cu_->instruction_set, kMips);
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
+  LIR* branch = OpCondBranch(c_code, tgt);
+  // Remember branch target - will process later
+  throw_launchpads_.Insert(tgt);
+  return branch;
+}
+
+LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind)
+{
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
+  LIR* branch;
+  if (c_code == kCondAl) {
+    branch = OpUnconditionalBranch(tgt);
+  } else {
+    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
+  }
+  // Remember branch target - will process later
+  throw_launchpads_.Insert(tgt);
+  return branch;
+}
+
+/* Perform null-check on a register.  */
+LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags)
+{
+  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
+    opt_flags & MIR_IGNORE_NULL_CHECK) {
+    return NULL;
+  }
+  return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
+}
+
+/* Perform check on two registers */
+LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
+                             ThrowKind kind)
+{
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
+  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
+  // Remember branch target - will process later
+  throw_launchpads_.Insert(tgt);
+  return branch;
+}
+
+void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
+                                  RegLocation rl_src2, LIR* taken,
+                                  LIR* fall_through)
+{
+  ConditionCode cond;
+  switch (opcode) {
+    case Instruction::IF_EQ:
+      cond = kCondEq;
+      break;
+    case Instruction::IF_NE:
+      cond = kCondNe;
+      break;
+    case Instruction::IF_LT:
+      cond = kCondLt;
+      break;
+    case Instruction::IF_GE:
+      cond = kCondGe;
+      break;
+    case Instruction::IF_GT:
+      cond = kCondGt;
+      break;
+    case Instruction::IF_LE:
+      cond = kCondLe;
+      break;
+    default:
+      cond = static_cast<ConditionCode>(0);
+      LOG(FATAL) << "Unexpected opcode " << opcode;
+  }
+
+  // Normalize such that if either operand is constant, src2 will be constant
+  if (rl_src1.is_const) {
+    RegLocation rl_temp = rl_src1;
+    rl_src1 = rl_src2;
+    rl_src2 = rl_temp;
+    cond = FlipComparisonOrder(cond);
+  }
+
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  // Is this really an immediate comparison?
+  if (rl_src2.is_const) {
+    // If it's already live in a register or not easily materialized, just keep going
+    RegLocation rl_temp = UpdateLoc(rl_src2);
+    if ((rl_temp.location == kLocDalvikFrame) &&
+        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
+      // OK - convert this to a compare immediate and branch
+      OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
+      OpUnconditionalBranch(fall_through);
+      return;
+    }
+  }
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+  OpUnconditionalBranch(fall_through);
+}
+
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
+                                      LIR* fall_through)
+{
+  ConditionCode cond;
+  rl_src = LoadValue(rl_src, kCoreReg);
+  switch (opcode) {
+    case Instruction::IF_EQZ:
+      cond = kCondEq;
+      break;
+    case Instruction::IF_NEZ:
+      cond = kCondNe;
+      break;
+    case Instruction::IF_LTZ:
+      cond = kCondLt;
+      break;
+    case Instruction::IF_GEZ:
+      cond = kCondGe;
+      break;
+    case Instruction::IF_GTZ:
+      cond = kCondGt;
+      break;
+    case Instruction::IF_LEZ:
+      cond = kCondLe;
+      break;
+    default:
+      cond = static_cast<ConditionCode>(0);
+      LOG(FATAL) << "Unexpected opcode " << opcode;
+  }
+  OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
+  OpUnconditionalBranch(fall_through);
+}
+
+void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (rl_src.location == kLocPhysReg) {
+    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  } else {
+    LoadValueDirect(rl_src, rl_result.low_reg);
+  }
+  OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
+                              RegLocation rl_src)
+{
+   rl_src = LoadValue(rl_src, kCoreReg);
+   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+   OpKind op = kOpInvalid;
+   switch (opcode) {
+     case Instruction::INT_TO_BYTE:
+       op = kOp2Byte;
+       break;
+     case Instruction::INT_TO_SHORT:
+        op = kOp2Short;
+        break;
+     case Instruction::INT_TO_CHAR:
+        op = kOp2Char;
+        break;
+     default:
+       LOG(ERROR) << "Bad int conversion type";
+   }
+   OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
+   StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Let helper function take care of everything.  Will call
+ * Array::AllocFromCode(type_idx, method, count);
+ * Note: AllocFromCode will handle checks for errNegativeArraySize.
+ */
+void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
+                          RegLocation rl_src)
+{
+  FlushAllRegs();  /* Everything to home location */
+  int func_offset;
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+                                                       type_idx)) {
+    func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+  } else {
+    func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+  }
+  CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Similar to GenNewArray, but with post-allocation initialization.
+ * Verifier guarantees we're dealing with an array class.  Current
+ * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
+ * Current code also throws internal unimp if not 'L', '[' or 'I'.
+ */
+void Mir2Lir::GenFilledNewArray(CallInfo* info)
+{
+  int elems = info->num_arg_words;
+  int type_idx = info->index;
+  FlushAllRegs();  /* Everything to home location */
+  int func_offset;
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+                                                       type_idx)) {
+    func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+  } else {
+    func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+  }
+  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
+  FreeTemp(TargetReg(kArg2));
+  FreeTemp(TargetReg(kArg1));
+  /*
+   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
+   * return region.  Because AllocFromCode placed the new array
+   * in kRet0, we'll just lock it into place.  When debugger support is
+   * added, it may be necessary to additionally copy all return
+   * values to a home location in thread-local storage
+   */
+  LockTemp(TargetReg(kRet0));
+
+  // TODO: use the correct component size, currently all supported types
+  // share array alignment with ints (see comment at head of function)
+  size_t component_size = sizeof(int32_t);
+
+  // Having a range of 0 is legal
+  if (info->is_range && (elems > 0)) {
+    /*
+     * Bit of ugliness here.  We're going generate a mem copy loop
+     * on the register range, but it is possible that some regs
+     * in the range have been promoted.  This is unlikely, but
+     * before generating the copy, we'll just force a flush
+     * of any regs in the source range that have been promoted to
+     * home location.
+     */
+    for (int i = 0; i < elems; i++) {
+      RegLocation loc = UpdateLoc(info->args[i]);
+      if (loc.location == kLocPhysReg) {
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
+                      loc.low_reg, kWord);
+      }
+    }
+    /*
+     * TUNING note: generated code here could be much improved, but
+     * this is an uncommon operation and isn't especially performance
+     * critical.
+     */
+    int r_src = AllocTemp();
+    int r_dst = AllocTemp();
+    int r_idx = AllocTemp();
+    int r_val = INVALID_REG;
+    switch(cu_->instruction_set) {
+      case kThumb2:
+        r_val = TargetReg(kLr);
+        break;
+      case kX86:
+        FreeTemp(TargetReg(kRet0));
+        r_val = AllocTemp();
+        break;
+      case kMips:
+        r_val = AllocTemp();
+        break;
+      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
+    }
+    // Set up source pointer
+    RegLocation rl_first = info->args[0];
+    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
+    // Set up the target pointer
+    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
+                mirror::Array::DataOffset(component_size).Int32Value());
+    // Set up the loop counter (known to be > 0)
+    LoadConstant(r_idx, elems - 1);
+    // Generate the copy loop.  Going backwards for convenience
+    LIR* target = NewLIR0(kPseudoTargetLabel);
+    // Copy next element
+    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
+    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
+    FreeTemp(r_val);
+    OpDecAndBranch(kCondGe, r_idx, target);
+    if (cu_->instruction_set == kX86) {
+      // Restore the target pointer
+      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
+                  -mirror::Array::DataOffset(component_size).Int32Value());
+    }
+  } else if (!info->is_range) {
+    // TUNING: interleave
+    for (int i = 0; i < elems; i++) {
+      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
+      StoreBaseDisp(TargetReg(kRet0),
+                    mirror::Array::DataOffset(component_size).Int32Value() +
+                    i * 4, rl_arg.low_reg, kWord);
+      // If the LoadValue caused a temp to be allocated, free it
+      if (IsTemp(rl_arg.low_reg)) {
+        FreeTemp(rl_arg.low_reg);
+      }
+    }
+  }
+  if (info->result.location != kLocInvalid) {
+    StoreValue(info->result, GetReturn(false /* not fp */));
+  }
+}
+
+void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+                      bool is_object)
+{
+  int field_offset;
+  int ssb_index;
+  bool is_volatile;
+  bool is_referrers_class;
+  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, true);
+  if (fast_path && !SLOW_FIELD_PATH) {
+    DCHECK_GE(field_offset, 0);
+    int rBase;
+    if (is_referrers_class) {
+      // Fast path, static storage base is this method's class
+      RegLocation rl_method  = LoadCurrMethod();
+      rBase = AllocTemp();
+      LoadWordDisp(rl_method.low_reg,
+                   mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+      if (IsTemp(rl_method.low_reg)) {
+        FreeTemp(rl_method.low_reg);
+      }
+    } else {
+      // Medium path, static storage base in a different class which requires checks that the other
+      // class is initialized.
+      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
+      DCHECK_GE(ssb_index, 0);
+      // May do runtime call so everything to home locations.
+      FlushAllRegs();
+      // Using fixed register to sync with possible call to runtime support.
+      int r_method = TargetReg(kArg1);
+      LockTemp(r_method);
+      LoadCurrMethodDirect(r_method);
+      rBase = TargetReg(kArg0);
+      LockTemp(rBase);
+      LoadWordDisp(r_method,
+                   mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+                   rBase);
+      LoadWordDisp(rBase,
+                   mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+                   sizeof(int32_t*) * ssb_index, rBase);
+      // rBase now points at appropriate static storage base (Class*)
+      // or NULL if not initialized. Check for NULL and call helper if NULL.
+      // TUNING: fast path should fall through
+      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
+      LoadConstant(TargetReg(kArg0), ssb_index);
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+      if (cu_->instruction_set == kMips) {
+        // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
+        OpRegCopy(rBase, TargetReg(kRet0));
+      }
+      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
+      branch_over->target = skip_target;
+      FreeTemp(r_method);
+    }
+    // rBase now holds static storage base
+    if (is_long_or_double) {
+      rl_src = LoadValueWide(rl_src, kAnyReg);
+    } else {
+      rl_src = LoadValue(rl_src, kAnyReg);
+    }
+    if (is_volatile) {
+      GenMemBarrier(kStoreStore);
+    }
+    if (is_long_or_double) {
+      StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
+                        rl_src.high_reg);
+    } else {
+      StoreWordDisp(rBase, field_offset, rl_src.low_reg);
+    }
+    if (is_volatile) {
+      GenMemBarrier(kStoreLoad);
+    }
+    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+      MarkGCCard(rl_src.low_reg, rBase);
+    }
+    FreeTemp(rBase);
+  } else {
+    FlushAllRegs();  // Everything to home locations
+    int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
+        (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
+        : ENTRYPOINT_OFFSET(pSet32Static));
+    CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+  }
+}
+
+void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+                      bool is_long_or_double, bool is_object)
+{
+  int field_offset;
+  int ssb_index;
+  bool is_volatile;
+  bool is_referrers_class;
+  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, false);
+  if (fast_path && !SLOW_FIELD_PATH) {
+    DCHECK_GE(field_offset, 0);
+    int rBase;
+    if (is_referrers_class) {
+      // Fast path, static storage base is this method's class
+      RegLocation rl_method  = LoadCurrMethod();
+      rBase = AllocTemp();
+      LoadWordDisp(rl_method.low_reg,
+                   mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+    } else {
+      // Medium path, static storage base in a different class which requires checks that the other
+      // class is initialized
+      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
+      DCHECK_GE(ssb_index, 0);
+      // May do runtime call so everything to home locations.
+      FlushAllRegs();
+      // Using fixed register to sync with possible call to runtime support.
+      int r_method = TargetReg(kArg1);
+      LockTemp(r_method);
+      LoadCurrMethodDirect(r_method);
+      rBase = TargetReg(kArg0);
+      LockTemp(rBase);
+      LoadWordDisp(r_method,
+                   mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+                   rBase);
+      LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+                   sizeof(int32_t*) * ssb_index, rBase);
+      // rBase now points at appropriate static storage base (Class*)
+      // or NULL if not initialized. Check for NULL and call helper if NULL.
+      // TUNING: fast path should fall through
+      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+      if (cu_->instruction_set == kMips) {
+        // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
+        OpRegCopy(rBase, TargetReg(kRet0));
+      }
+      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
+      branch_over->target = skip_target;
+      FreeTemp(r_method);
+    }
+    // rBase now holds static storage base
+    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
+    if (is_volatile) {
+      GenMemBarrier(kLoadLoad);
+    }
+    if (is_long_or_double) {
+      LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
+                       rl_result.high_reg, INVALID_SREG);
+    } else {
+      LoadWordDisp(rBase, field_offset, rl_result.low_reg);
+    }
+    FreeTemp(rBase);
+    if (is_long_or_double) {
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      StoreValue(rl_dest, rl_result);
+    }
+  } else {
+    FlushAllRegs();  // Everything to home locations
+    int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
+        (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
+        : ENTRYPOINT_OFFSET(pGet32Static));
+    CallRuntimeHelperImm(getterOffset, field_idx, true);
+    if (is_long_or_double) {
+      RegLocation rl_result = GetReturnWide(rl_dest.fp);
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      RegLocation rl_result = GetReturn(rl_dest.fp);
+      StoreValue(rl_dest, rl_result);
+    }
+  }
+}
+
+void Mir2Lir::HandleSuspendLaunchPads()
+{
+  int num_elems = suspend_launchpads_.Size();
+  int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+  for (int i = 0; i < num_elems; i++) {
+    ResetRegPool();
+    ResetDefTracking();
+    LIR* lab = suspend_launchpads_.Get(i);
+    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+    current_dalvik_offset_ = lab->operands[1];
+    AppendLIR(lab);
+    int r_tgt = CallHelperSetup(helper_offset);
+    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
+    OpUnconditionalBranch(resume_lab);
+  }
+}
+
+void Mir2Lir::HandleIntrinsicLaunchPads()
+{
+  int num_elems = intrinsic_launchpads_.Size();
+  for (int i = 0; i < num_elems; i++) {
+    ResetRegPool();
+    ResetDefTracking();
+    LIR* lab = intrinsic_launchpads_.Get(i);
+    CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
+    current_dalvik_offset_ = info->offset;
+    AppendLIR(lab);
+    // NOTE: GenInvoke handles MarkSafepointPC
+    GenInvoke(info);
+    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+    if (resume_lab != NULL) {
+      OpUnconditionalBranch(resume_lab);
+    }
+  }
+}
+
+void Mir2Lir::HandleThrowLaunchPads()
+{
+  int num_elems = throw_launchpads_.Size();
+  for (int i = 0; i < num_elems; i++) {
+    ResetRegPool();
+    ResetDefTracking();
+    LIR* lab = throw_launchpads_.Get(i);
+    current_dalvik_offset_ = lab->operands[1];
+    AppendLIR(lab);
+    int func_offset = 0;
+    int v1 = lab->operands[2];
+    int v2 = lab->operands[3];
+    bool target_x86 = (cu_->instruction_set == kX86);
+    switch (lab->operands[0]) {
+      case kThrowNullPointer:
+        func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+        break;
+      case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
+        // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
+        if (target_x86) {
+          OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
+        } else {
+          OpRegCopy(TargetReg(kArg1), v1);
+        }
+        // Make sure the following LoadConstant doesn't mess with kArg1.
+        LockTemp(TargetReg(kArg1));
+        LoadConstant(TargetReg(kArg0), v2);
+        func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        break;
+      case kThrowArrayBounds:
+        // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
+        if (v2 != TargetReg(kArg0)) {
+          OpRegCopy(TargetReg(kArg0), v1);
+          if (target_x86) {
+            // x86 leaves the array pointer in v2, so load the array length that the handler expects
+            OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+          } else {
+            OpRegCopy(TargetReg(kArg1), v2);
+          }
+        } else {
+          if (v1 == TargetReg(kArg1)) {
+            // Swap v1 and v2, using kArg2 as a temp
+            OpRegCopy(TargetReg(kArg2), v1);
+            if (target_x86) {
+              // x86 leaves the array pointer in v2; load the array length that the handler expects
+              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+            } else {
+              OpRegCopy(TargetReg(kArg1), v2);
+            }
+            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+          } else {
+            if (target_x86) {
+              // x86 leaves the array pointer in v2; load the array length that the handler expects
+              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+            } else {
+              OpRegCopy(TargetReg(kArg1), v2);
+            }
+            OpRegCopy(TargetReg(kArg0), v1);
+          }
+        }
+        func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        break;
+      case kThrowDivZero:
+        func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+        break;
+      case kThrowNoSuchMethod:
+        OpRegCopy(TargetReg(kArg0), v1);
+        func_offset =
+          ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+        break;
+      case kThrowStackOverflow:
+        func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+        // Restore stack alignment
+        if (target_x86) {
+          OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
+        } else {
+          OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
+        }
+        break;
+      default:
+        LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
+    }
+    ClobberCalleeSave();
+    int r_tgt = CallHelperSetup(func_offset);
+    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
+  }
+}
+
+void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
+                      bool is_object)
+{
+  int field_offset;
+  bool is_volatile;
+
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+
+  if (fast_path && !SLOW_FIELD_PATH) {
+    RegLocation rl_result;
+    RegisterClass reg_class = oat_reg_class_by_size(size);
+    DCHECK_GE(field_offset, 0);
+    rl_obj = LoadValue(rl_obj, kCoreReg);
+    if (is_long_or_double) {
+      DCHECK(rl_dest.wide);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      if (cu_->instruction_set == kX86) {
+        rl_result = EvalLoc(rl_dest, reg_class, true);
+        GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+        LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
+                         rl_result.high_reg, rl_obj.s_reg_low);
+        if (is_volatile) {
+          GenMemBarrier(kLoadLoad);
+        }
+      } else {
+        int reg_ptr = AllocTemp();
+        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+        rl_result = EvalLoc(rl_dest, reg_class, true);
+        LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+        if (is_volatile) {
+          GenMemBarrier(kLoadLoad);
+        }
+        FreeTemp(reg_ptr);
+      }
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      rl_result = EvalLoc(rl_dest, reg_class, true);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
+                   kWord, rl_obj.s_reg_low);
+      if (is_volatile) {
+        GenMemBarrier(kLoadLoad);
+      }
+      StoreValue(rl_dest, rl_result);
+    }
+  } else {
+    int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
+        (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
+        : ENTRYPOINT_OFFSET(pGet32Instance));
+    CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+    if (is_long_or_double) {
+      RegLocation rl_result = GetReturnWide(rl_dest.fp);
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      RegLocation rl_result = GetReturn(rl_dest.fp);
+      StoreValue(rl_dest, rl_result);
+    }
+  }
+}
+
+void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
+                      bool is_object)
+{
+  int field_offset;
+  bool is_volatile;
+
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
+                 true);
+  if (fast_path && !SLOW_FIELD_PATH) {
+    RegisterClass reg_class = oat_reg_class_by_size(size);
+    DCHECK_GE(field_offset, 0);
+    rl_obj = LoadValue(rl_obj, kCoreReg);
+    if (is_long_or_double) {
+      int reg_ptr;
+      rl_src = LoadValueWide(rl_src, kAnyReg);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      reg_ptr = AllocTemp();
+      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+      if (is_volatile) {
+        GenMemBarrier(kStoreStore);
+      }
+      StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+      if (is_volatile) {
+        GenMemBarrier(kLoadLoad);
+      }
+      FreeTemp(reg_ptr);
+    } else {
+      rl_src = LoadValue(rl_src, reg_class);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      if (is_volatile) {
+        GenMemBarrier(kStoreStore);
+      }
+      StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
+      if (is_volatile) {
+        GenMemBarrier(kLoadLoad);
+      }
+      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+        MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
+      }
+    }
+  } else {
+    int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
+        (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
+        : ENTRYPOINT_OFFSET(pSet32Instance));
+    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+  }
+}
+
+void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest)
+{
+  RegLocation rl_method = LoadCurrMethod();
+  int res_reg = AllocTemp();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                   *cu_->dex_file,
+                                                   type_idx)) {
+    // Call out to helper which resolves type and verifies access.
+    // Resolved type returned in kRet0.
+    CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+                            type_idx, rl_method.low_reg, true);
+    RegLocation rl_result = GetReturn(false);
+    StoreValue(rl_dest, rl_result);
+  } else {
+    // We're don't need access checks, load type from dex cache
+    int32_t dex_cache_offset =
+        mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
+    LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
+    int32_t offset_of_type =
+        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
+                          * type_idx);
+    LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
+    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
+        type_idx) || SLOW_TYPE_PATH) {
+      // Slow path, at runtime test if type is null and if so initialize
+      FlushAllRegs();
+      LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
+      // Resolved, store and hop over following code
+      StoreValue(rl_dest, rl_result);
+      /*
+       * Because we have stores of the target value on two paths,
+       * clobber temp tracking for the destination using the ssa name
+       */
+      ClobberSReg(rl_dest.s_reg_low);
+      LIR* branch2 = OpUnconditionalBranch(0);
+      // TUNING: move slow path to end & remove unconditional branch
+      LIR* target1 = NewLIR0(kPseudoTargetLabel);
+      // Call out to helper, which will return resolved type in kArg0
+      CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+                              rl_method.low_reg, true);
+      RegLocation rl_result = GetReturn(false);
+      StoreValue(rl_dest, rl_result);
+      /*
+       * Because we have stores of the target value on two paths,
+       * clobber temp tracking for the destination using the ssa name
+       */
+      ClobberSReg(rl_dest.s_reg_low);
+      // Rejoin code paths
+      LIR* target2 = NewLIR0(kPseudoTargetLabel);
+      branch1->target = target1;
+      branch2->target = target2;
+    } else {
+      // Fast path, we're done - just store result
+      StoreValue(rl_dest, rl_result);
+    }
+  }
+}
+
+void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest)
+{
+  /* NOTE: Most strings should be available at compile time */
+  int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
+                 (sizeof(mirror::String*) * string_idx);
+  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
+      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
+    // slow path, resolve string if not in dex cache
+    FlushAllRegs();
+    LockCallTemps(); // Using explicit registers
+    LoadCurrMethodDirect(TargetReg(kArg2));
+    LoadWordDisp(TargetReg(kArg2),
+                 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
+    // Might call out to helper, which will return resolved string in kRet0
+    int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
+    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+    LoadConstant(TargetReg(kArg1), string_idx);
+    if (cu_->instruction_set == kThumb2) {
+      OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
+      GenBarrier();
+      // For testing, always force through helper
+      if (!EXERCISE_SLOWEST_STRING_PATH) {
+        OpIT(kCondEq, "T");
+      }
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
+      LIR* call_inst = OpReg(kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
+      MarkSafepointPC(call_inst);
+      FreeTemp(r_tgt);
+    } else if (cu_->instruction_set == kMips) {
+      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
+      LIR* call_inst = OpReg(kOpBlx, r_tgt);
+      MarkSafepointPC(call_inst);
+      FreeTemp(r_tgt);
+      LIR* target = NewLIR0(kPseudoTargetLabel);
+      branch->target = target;
+    } else {
+      DCHECK_EQ(cu_->instruction_set, kX86);
+      CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+    }
+    GenBarrier();
+    StoreValue(rl_dest, GetReturn(false));
+  } else {
+    RegLocation rl_method = LoadCurrMethod();
+    int res_reg = AllocTemp();
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadWordDisp(rl_method.low_reg,
+                 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+    LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Let helper function take care of everything.  Will
+ * call Class::NewInstanceFromCode(type_idx, method);
+ */
+void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest)
+{
+  FlushAllRegs();  /* Everything to home location */
+  // alloc will always check for resolution, do we also need to verify
+  // access because the verifier was unable to?
+  int func_offset;
+  if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
+      cu_->method_idx, *cu_->dex_file, type_idx)) {
+    func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+  } else {
+    func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+  }
+  CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mir2Lir::GenThrow(RegLocation rl_src)
+{
+  FlushAllRegs();
+  CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+}
+
+// For final classes there are no sub-classes to check and so we can answer the instance-of
+// question with simple comparisons.
+void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
+                                 RegLocation rl_src) {
+  RegLocation object = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int result_reg = rl_result.low_reg;
+  if (result_reg == object.low_reg) {
+    result_reg = AllocTypedTemp(false, kCoreReg);
+  }
+  LoadConstant(result_reg, 0);     // assume false
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+
+  int check_class = AllocTypedTemp(false, kCoreReg);
+  int object_class = AllocTypedTemp(false, kCoreReg);
+
+  LoadCurrMethodDirect(check_class);
+  if (use_declaring_class) {
+    LoadWordDisp(check_class, mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+                 check_class);
+    LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
+  } else {
+    LoadWordDisp(check_class, mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(),
+                 check_class);
+    LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
+    int32_t offset_of_type =
+      mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
+      (sizeof(mirror::Class*) * type_idx);
+    LoadWordDisp(check_class, offset_of_type, check_class);
+  }
+
+  LIR* ne_branchover = NULL;
+  if (cu_->instruction_set == kThumb2) {
+    OpRegReg(kOpCmp, check_class, object_class);  // Same?
+    OpIT(kCondEq, "");   // if-convert the test
+    LoadConstant(result_reg, 1);     // .eq case - load true
+  } else {
+    ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
+    LoadConstant(result_reg, 1);     // eq case - load true
+  }
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  null_branchover->target = target;
+  if (ne_branchover != NULL) {
+    ne_branchover->target = target;
+  }
+  FreeTemp(object_class);
+  FreeTemp(check_class);
+  if (IsTemp(result_reg)) {
+    OpRegCopy(rl_result.low_reg, result_reg);
+    FreeTemp(result_reg);
+  }
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+                                         bool type_known_abstract, bool use_declaring_class,
+                                         bool can_assume_type_is_in_dex_cache,
+                                         uint32_t type_idx, RegLocation rl_dest,
+                                         RegLocation rl_src) {
+  FlushAllRegs();
+  // May generate a call - use explicit registers
+  LockCallTemps();
+  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
+  int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
+  if (needs_access_check) {
+    // Check we have access to type_idx and if not throw IllegalAccessError,
+    // returns Class* in kArg0
+    CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+                         type_idx, true);
+    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
+  } else if (use_declaring_class) {
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
+  } else {
+    // Load dex cache entry into class_reg (kArg2)
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+    int32_t offset_of_type =
+        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
+        * type_idx);
+    LoadWordDisp(class_reg, offset_of_type, class_reg);
+    if (!can_assume_type_is_in_dex_cache) {
+      // Need to test presence of type in dex cache at runtime
+      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
+      // Not resolved
+      // Call out to helper, which will return resolved type in kRet0
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
+      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
+      // Rejoin code paths
+      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
+      hop_branch->target = hop_target;
+    }
+  }
+  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
+  RegLocation rl_result = GetReturn(false);
+  if (cu_->instruction_set == kMips) {
+    // On MIPS rArg0 != rl_result, place false in result if branch is taken.
+    LoadConstant(rl_result.low_reg, 0);
+  }
+  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
+
+  /* load object->klass_ */
+  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
+  LIR* branchover = NULL;
+  if (type_known_final) {
+    // rl_result == ref == null == 0.
+    if (cu_->instruction_set == kThumb2) {
+      OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
+      OpIT(kCondEq, "E");   // if-convert the test
+      LoadConstant(rl_result.low_reg, 1);     // .eq case - load true
+      LoadConstant(rl_result.low_reg, 0);     // .ne case - load false
+    } else {
+      LoadConstant(rl_result.low_reg, 0);     // ne case - load false
+      branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
+      LoadConstant(rl_result.low_reg, 1);     // eq case - load true
+    }
+  } else {
+    if (cu_->instruction_set == kThumb2) {
+      int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      if (!type_known_abstract) {
+      /* Uses conditional nullification */
+        OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
+        OpIT(kCondEq, "EE");   // if-convert the test
+        LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
+      }
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
+      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
+      FreeTemp(r_tgt);
+    } else {
+      if (!type_known_abstract) {
+        /* Uses branchovers */
+        LoadConstant(rl_result.low_reg, 1);     // assume true
+        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+      }
+      if (cu_->instruction_set != kX86) {
+        int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
+        OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
+        FreeTemp(r_tgt);
+      } else {
+        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+        OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      }
+    }
+  }
+  // TODO: only clobber when type isn't final?
+  ClobberCalleeSave();
+  /* branch targets here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
+  branch1->target = target;
+  if (branchover != NULL) {
+    branchover->target = target;
+  }
+}
+
+void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
+  bool type_known_final, type_known_abstract, use_declaring_class;
+  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                                              *cu_->dex_file,
+                                                                              type_idx,
+                                                                              &type_known_final,
+                                                                              &type_known_abstract,
+                                                                              &use_declaring_class);
+  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
+      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
+
+  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
+    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
+  } else {
+    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
+                               use_declaring_class, can_assume_type_is_in_dex_cache,
+                               type_idx, rl_dest, rl_src);
+  }
+}
+
+void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src)
+{
+  bool type_known_final, type_known_abstract, use_declaring_class;
+  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                                              *cu_->dex_file,
+                                                                              type_idx,
+                                                                              &type_known_final,
+                                                                              &type_known_abstract,
+                                                                              &use_declaring_class);
+  // Note: currently type_known_final is unused, as optimizing will only improve the performance
+  // of the exception throw path.
+  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
+  const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
+  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
+    // Verifier type analysis proved this check cast would never cause an exception.
+    return;
+  }
+  FlushAllRegs();
+  // May generate a call - use explicit registers
+  LockCallTemps();
+  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
+  int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
+  if (needs_access_check) {
+    // Check we have access to type_idx and if not throw IllegalAccessError,
+    // returns Class* in kRet0
+    // InitializeTypeAndVerifyAccess(idx, method)
+    CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+                            type_idx, TargetReg(kArg1), true);
+    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
+  } else if (use_declaring_class) {
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
+  } else {
+    // Load dex cache entry into class_reg (kArg2)
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+    int32_t offset_of_type =
+        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
+        (sizeof(mirror::Class*) * type_idx);
+    LoadWordDisp(class_reg, offset_of_type, class_reg);
+    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
+      // Need to test presence of type in dex cache at runtime
+      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
+      // Not resolved
+      // Call out to helper, which will return resolved type in kArg0
+      // InitializeTypeFromCode(idx, method)
+      CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
+                              true);
+      OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
+      // Rejoin code paths
+      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
+      hop_branch->target = hop_target;
+    }
+  }
+  // At this point, class_reg (kArg2) has class
+  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
+  /* Null is OK - continue */
+  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
+  /* load object->klass_ */
+  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+  LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+  /* kArg1 now contains object->klass_ */
+  LIR* branch2 = NULL;
+  if (!type_known_abstract) {
+    branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
+  }
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
+                          true);
+  /* branch target here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch1->target = target;
+  if (branch2 != NULL) {
+    branch2->target = target;
+  }
+}
+
+void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
+{
+  RegLocation rl_result;
+  if (cu_->instruction_set == kThumb2) {
+    /*
+     * NOTE:  This is the one place in the code in which we might have
+     * as many as six live temporary registers.  There are 5 in the normal
+     * set for Arm.  Until we have spill capabilities, temporarily add
+     * lr to the temp set.  It is safe to do this locally, but note that
+     * lr is used explicitly elsewhere in the code generator and cannot
+     * normally be used as a general temp register.
+     */
+    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
+    FreeTemp(TargetReg(kLr));   // and make it available
+  }
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  // The longs may overlap - use intermediate temp if so
+  if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){
+    int t_reg = AllocTemp();
+    OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+    OpRegCopy(rl_result.low_reg, t_reg);
+    FreeTemp(t_reg);
+  } else {
+    OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
+                rl_src2.high_reg);
+  }
+  /*
+   * NOTE: If rl_dest refers to a frame variable in a large frame, the
+   * following StoreValueWide might need to allocate a temp register.
+   * To further work around the lack of a spill capability, explicitly
+   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
+   * Remove when spill is functional.
+   */
+  FreeRegLocTemps(rl_result, rl_src1);
+  FreeRegLocTemps(rl_result, rl_src2);
+  StoreValueWide(rl_dest, rl_result);
+  if (cu_->instruction_set == kThumb2) {
+    Clobber(TargetReg(kLr));
+    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
+  }
+}
+
+
+void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_shift)
+{
+  int func_offset = -1; // Make gcc happy
+
+  switch (opcode) {
+    case Instruction::SHL_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+      func_offset = ENTRYPOINT_OFFSET(pShlLong);
+      break;
+    case Instruction::SHR_LONG:
+    case Instruction::SHR_LONG_2ADDR:
+      func_offset = ENTRYPOINT_OFFSET(pShrLong);
+      break;
+    case Instruction::USHR_LONG:
+    case Instruction::USHR_LONG_2ADDR:
+      func_offset = ENTRYPOINT_OFFSET(pUshrLong);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected case";
+  }
+  FlushAllRegs();   /* Send everything to home location */
+  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
+  RegLocation rl_result = GetReturnWide(false);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+
+void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
+                            RegLocation rl_src1, RegLocation rl_src2)
+{
+  OpKind op = kOpBkpt;
+  bool is_div_rem = false;
+  bool check_zero = false;
+  bool unary = false;
+  RegLocation rl_result;
+  bool shift_op = false;
+  switch (opcode) {
+    case Instruction::NEG_INT:
+      op = kOpNeg;
+      unary = true;
+      break;
+    case Instruction::NOT_INT:
+      op = kOpMvn;
+      unary = true;
+      break;
+    case Instruction::ADD_INT:
+    case Instruction::ADD_INT_2ADDR:
+      op = kOpAdd;
+      break;
+    case Instruction::SUB_INT:
+    case Instruction::SUB_INT_2ADDR:
+      op = kOpSub;
+      break;
+    case Instruction::MUL_INT:
+    case Instruction::MUL_INT_2ADDR:
+      op = kOpMul;
+      break;
+    case Instruction::DIV_INT:
+    case Instruction::DIV_INT_2ADDR:
+      check_zero = true;
+      op = kOpDiv;
+      is_div_rem = true;
+      break;
+    /* NOTE: returns in kArg1 */
+    case Instruction::REM_INT:
+    case Instruction::REM_INT_2ADDR:
+      check_zero = true;
+      op = kOpRem;
+      is_div_rem = true;
+      break;
+    case Instruction::AND_INT:
+    case Instruction::AND_INT_2ADDR:
+      op = kOpAnd;
+      break;
+    case Instruction::OR_INT:
+    case Instruction::OR_INT_2ADDR:
+      op = kOpOr;
+      break;
+    case Instruction::XOR_INT:
+    case Instruction::XOR_INT_2ADDR:
+      op = kOpXor;
+      break;
+    case Instruction::SHL_INT:
+    case Instruction::SHL_INT_2ADDR:
+      shift_op = true;
+      op = kOpLsl;
+      break;
+    case Instruction::SHR_INT:
+    case Instruction::SHR_INT_2ADDR:
+      shift_op = true;
+      op = kOpAsr;
+      break;
+    case Instruction::USHR_INT:
+    case Instruction::USHR_INT_2ADDR:
+      shift_op = true;
+      op = kOpLsr;
+      break;
+    default:
+      LOG(FATAL) << "Invalid word arith op: " << opcode;
+  }
+  if (!is_div_rem) {
+    if (unary) {
+      rl_src1 = LoadValue(rl_src1, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
+    } else {
+      if (shift_op) {
+        int t_reg = INVALID_REG;
+        if (cu_->instruction_set == kX86) {
+          // X86 doesn't require masking and must use ECX
+          t_reg = TargetReg(kCount);  // rCX
+          LoadValueDirectFixed(rl_src2, t_reg);
+        } else {
+          rl_src2 = LoadValue(rl_src2, kCoreReg);
+          t_reg = AllocTemp();
+          OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
+        }
+        rl_src1 = LoadValue(rl_src1, kCoreReg);
+        rl_result = EvalLoc(rl_dest, kCoreReg, true);
+        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+        FreeTemp(t_reg);
+      } else {
+        rl_src1 = LoadValue(rl_src1, kCoreReg);
+        rl_src2 = LoadValue(rl_src2, kCoreReg);
+        rl_result = EvalLoc(rl_dest, kCoreReg, true);
+        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+      }
+    }
+    StoreValue(rl_dest, rl_result);
+  } else {
+    if (cu_->instruction_set == kMips) {
+      rl_src1 = LoadValue(rl_src1, kCoreReg);
+      rl_src2 = LoadValue(rl_src2, kCoreReg);
+      if (check_zero) {
+          GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+      }
+      rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+    } else {
+      int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+      FlushAllRegs();   /* Send everything to home location */
+      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
+      int r_tgt = CallHelperSetup(func_offset);
+      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
+      if (check_zero) {
+        GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
+      }
+      // NOTE: callout here is not a safepoint
+      CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
+      if (op == kOpDiv)
+        rl_result = GetReturn(false);
+      else
+        rl_result = GetReturnAlt();
+    }
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ */
+
+static bool IsPowerOfTwo(int x)
+{
+  return (x & (x - 1)) == 0;
+}
+
+// Returns true if no more than two bits are set in 'x'.
+static bool IsPopCountLE2(unsigned int x)
+{
+  x &= x - 1;
+  return (x & (x - 1)) == 0;
+}
+
+// Returns the index of the lowest set bit in 'x'.
+static int LowestSetBit(unsigned int x) {
+  int bit_posn = 0;
+  while ((x & 0xf) == 0) {
+    bit_posn += 4;
+    x >>= 4;
+  }
+  while ((x & 1) == 0) {
+    bit_posn++;
+    x >>= 1;
+  }
+  return bit_posn;
+}
+
+// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode,
+                               RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
+    return false;
+  }
+  // No divide instruction for Arm, so check for more special cases
+  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
+    return SmallLiteralDivide(dalvik_opcode, rl_src, rl_dest, lit);
+  }
+  int k = LowestSetBit(lit);
+  if (k >= 30) {
+    // Avoid special cases.
+    return false;
+  }
+  bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
+      dalvik_opcode == Instruction::DIV_INT_LIT16);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (div) {
+    int t_reg = AllocTemp();
+    if (lit == 2) {
+      // Division by 2 is by far the most common division by constant.
+      OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+    } else {
+      OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
+      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+    }
+  } else {
+    int t_reg1 = AllocTemp();
+    int t_reg2 = AllocTemp();
+    if (lit == 2) {
+      OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
+      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+    } else {
+      OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
+      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
+      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+    }
+  }
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  // Can we simplify this multiplication?
+  bool power_of_two = false;
+  bool pop_count_le2 = false;
+  bool power_of_two_minus_one = false;
+  if (lit < 2) {
+    // Avoid special cases.
+    return false;
+  } else if (IsPowerOfTwo(lit)) {
+    power_of_two = true;
+  } else if (IsPopCountLE2(lit)) {
+    pop_count_le2 = true;
+  } else if (IsPowerOfTwo(lit + 1)) {
+    power_of_two_minus_one = true;
+  } else {
+    return false;
+  }
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (power_of_two) {
+    // Shift.
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
+  } else if (pop_count_le2) {
+    // Shift and add and shift.
+    int first_bit = LowestSetBit(lit);
+    int second_bit = LowestSetBit(lit ^ (1 << first_bit));
+    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
+  } else {
+    // Reverse subtract: (src << (shift + 1)) - src.
+    DCHECK(power_of_two_minus_one);
+    // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
+    int t_reg = AllocTemp();
+    OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+    OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+  }
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
+                               int lit)
+{
+  RegLocation rl_result;
+  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
+  int shift_op = false;
+  bool is_div = false;
+
+  switch (opcode) {
+    case Instruction::RSUB_INT_LIT8:
+    case Instruction::RSUB_INT: {
+      rl_src = LoadValue(rl_src, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      if (cu_->instruction_set == kThumb2) {
+        OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
+      } else {
+        OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+        OpRegImm(kOpAdd, rl_result.low_reg, lit);
+      }
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+
+    case Instruction::SUB_INT:
+    case Instruction::SUB_INT_2ADDR:
+      lit = -lit;
+      // Intended fallthrough
+    case Instruction::ADD_INT:
+    case Instruction::ADD_INT_2ADDR:
+    case Instruction::ADD_INT_LIT8:
+    case Instruction::ADD_INT_LIT16:
+      op = kOpAdd;
+      break;
+    case Instruction::MUL_INT:
+    case Instruction::MUL_INT_2ADDR:
+    case Instruction::MUL_INT_LIT8:
+    case Instruction::MUL_INT_LIT16: {
+      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
+        return;
+      }
+      op = kOpMul;
+      break;
+    }
+    case Instruction::AND_INT:
+    case Instruction::AND_INT_2ADDR:
+    case Instruction::AND_INT_LIT8:
+    case Instruction::AND_INT_LIT16:
+      op = kOpAnd;
+      break;
+    case Instruction::OR_INT:
+    case Instruction::OR_INT_2ADDR:
+    case Instruction::OR_INT_LIT8:
+    case Instruction::OR_INT_LIT16:
+      op = kOpOr;
+      break;
+    case Instruction::XOR_INT:
+    case Instruction::XOR_INT_2ADDR:
+    case Instruction::XOR_INT_LIT8:
+    case Instruction::XOR_INT_LIT16:
+      op = kOpXor;
+      break;
+    case Instruction::SHL_INT_LIT8:
+    case Instruction::SHL_INT:
+    case Instruction::SHL_INT_2ADDR:
+      lit &= 31;
+      shift_op = true;
+      op = kOpLsl;
+      break;
+    case Instruction::SHR_INT_LIT8:
+    case Instruction::SHR_INT:
+    case Instruction::SHR_INT_2ADDR:
+      lit &= 31;
+      shift_op = true;
+      op = kOpAsr;
+      break;
+    case Instruction::USHR_INT_LIT8:
+    case Instruction::USHR_INT:
+    case Instruction::USHR_INT_2ADDR:
+      lit &= 31;
+      shift_op = true;
+      op = kOpLsr;
+      break;
+
+    case Instruction::DIV_INT:
+    case Instruction::DIV_INT_2ADDR:
+    case Instruction::DIV_INT_LIT8:
+    case Instruction::DIV_INT_LIT16:
+    case Instruction::REM_INT:
+    case Instruction::REM_INT_2ADDR:
+    case Instruction::REM_INT_LIT8:
+    case Instruction::REM_INT_LIT16: {
+      if (lit == 0) {
+        GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
+        return;
+      }
+      if (HandleEasyDivide(opcode, rl_src, rl_dest, lit)) {
+        return;
+      }
+      if ((opcode == Instruction::DIV_INT_LIT8) ||
+          (opcode == Instruction::DIV_INT) ||
+          (opcode == Instruction::DIV_INT_2ADDR) ||
+          (opcode == Instruction::DIV_INT_LIT16)) {
+        is_div = true;
+      } else {
+        is_div = false;
+      }
+      if (cu_->instruction_set == kMips) {
+        rl_src = LoadValue(rl_src, kCoreReg);
+        rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
+      } else {
+        FlushAllRegs();   /* Everything to home location */
+        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+        Clobber(TargetReg(kArg0));
+        int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
+        if (is_div)
+          rl_result = GetReturn(false);
+        else
+          rl_result = GetReturnAlt();
+      }
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+    default:
+      LOG(FATAL) << "Unexpected opcode " << opcode;
+  }
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
+  if (shift_op && (lit == 0)) {
+    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  } else {
+    OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
+  }
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_src2)
+{
+  RegLocation rl_result;
+  OpKind first_op = kOpBkpt;
+  OpKind second_op = kOpBkpt;
+  bool call_out = false;
+  bool check_zero = false;
+  int func_offset;
+  int ret_reg = TargetReg(kRet0);
+
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      // Check for destructive overlap
+      if (rl_result.low_reg == rl_src2.high_reg) {
+        int t_reg = AllocTemp();
+        OpRegCopy(t_reg, rl_src2.high_reg);
+        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+        OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
+        FreeTemp(t_reg);
+      } else {
+        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+        OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
+      }
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      if (cu_->instruction_set != kThumb2) {
+        GenAddLong(rl_dest, rl_src1, rl_src2);
+        return;
+      }
+      first_op = kOpAdd;
+      second_op = kOpAdc;
+      break;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      if (cu_->instruction_set != kThumb2) {
+        GenSubLong(rl_dest, rl_src1, rl_src2);
+        return;
+      }
+      first_op = kOpSub;
+      second_op = kOpSbc;
+      break;
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      if (cu_->instruction_set == kThumb2) {
+        GenMulLong(rl_dest, rl_src1, rl_src2);
+        return;
+      } else {
+        call_out = true;
+        ret_reg = TargetReg(kRet0);
+        func_offset = ENTRYPOINT_OFFSET(pLmul);
+      }
+      break;
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      call_out = true;
+      check_zero = true;
+      ret_reg = TargetReg(kRet0);
+      func_offset = ENTRYPOINT_OFFSET(pLdiv);
+      break;
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      call_out = true;
+      check_zero = true;
+      func_offset = ENTRYPOINT_OFFSET(pLdivmod);
+      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
+      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+      break;
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_LONG:
+      if (cu_->instruction_set == kX86) {
+        return GenAndLong(rl_dest, rl_src1, rl_src2);
+      }
+      first_op = kOpAnd;
+      second_op = kOpAnd;
+      break;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      if (cu_->instruction_set == kX86) {
+        GenOrLong(rl_dest, rl_src1, rl_src2);
+        return;
+      }
+      first_op = kOpOr;
+      second_op = kOpOr;
+      break;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      if (cu_->instruction_set == kX86) {
+        GenXorLong(rl_dest, rl_src1, rl_src2);
+        return;
+      }
+      first_op = kOpXor;
+      second_op = kOpXor;
+      break;
+    case Instruction::NEG_LONG: {
+      GenNegLong(rl_dest, rl_src2);
+      return;
+    }
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+  }
+  if (!call_out) {
+    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
+  } else {
+    FlushAllRegs();   /* Send everything to home location */
+    if (check_zero) {
+      LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
+      int r_tgt = CallHelperSetup(func_offset);
+      GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
+      LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
+      // NOTE: callout here is not a safepoint
+      CallHelper(r_tgt, func_offset, false /* not safepoint */);
+    } else {
+      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+    }
+    // Adjust return regs in to handle case of rem returning kArg2/kArg3
+    if (ret_reg == TargetReg(kRet0))
+      rl_result = GetReturnWide(false);
+    else
+      rl_result = GetReturnWideAlt();
+    StoreValueWide(rl_dest, rl_result);
+  }
+}
+
+void Mir2Lir::GenConversionCall(int func_offset,
+                                RegLocation rl_dest, RegLocation rl_src)
+{
+  /*
+   * Don't optimize the register usage since it calls out to support
+   * functions
+   */
+  FlushAllRegs();   /* Send everything to home location */
+  if (rl_src.wide) {
+    LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
+                             rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+  } else {
+    LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+  }
+  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
+  if (rl_dest.wide) {
+    RegLocation rl_result;
+    rl_result = GetReturnWide(rl_dest.fp);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    RegLocation rl_result;
+    rl_result = GetReturn(rl_dest.fp);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/* Check if we need to check for pending suspend request */
+void Mir2Lir::GenSuspendTest(int opt_flags)
+{
+  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+    return;
+  }
+  FlushAllRegs();
+  LIR* branch = OpTestSuspend(NULL);
+  LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
+  LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
+                       reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
+  branch->target = target;
+  suspend_launchpads_.Insert(target);
+}
+
+/* Check if we need to check for pending suspend request */
+void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target)
+{
+  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+    OpUnconditionalBranch(target);
+    return;
+  }
+  OpTestSuspend(target);
+  LIR* launch_pad =
+      RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
+             reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
+  FlushAllRegs();
+  OpUnconditionalBranch(launch_pad);
+  suspend_launchpads_.Insert(launch_pad);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
new file mode 100644
index 0000000..e3993e0
--- /dev/null
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -0,0 +1,1476 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_ir.h"
+#include "dex_file-inl.h"
+#include "invoke_type.h"
+#include "mirror/array.h"
+#include "mirror/string.h"
+#include "mir_to_lir-inl.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "x86/codegen_x86.h"
+
+namespace art {
+
+/*
+ * This source files contains "gen" codegen routines that should
+ * be applicable to most targets.  Only mid-level support utilities
+ * and "op" calls may be used here.
+ */
+
+/*
+ * To save scheduling time, helper calls are broken into two parts: generation of
+ * the helper target address, and the actuall call to the helper.  Because x86
+ * has a memory call operation, part 1 is a NOP for x86.  For other targets,
+ * load arguments between the two parts.
+ */
+int Mir2Lir::CallHelperSetup(int helper_offset)
+{
+  return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
+}
+
+/* NOTE: if r_tgt is a temp, it will be freed following use */
+LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc)
+{
+  LIR* call_inst;
+  if (cu_->instruction_set == kX86) {
+    call_inst = OpThreadMem(kOpBlx, helper_offset);
+  } else {
+    call_inst = OpReg(kOpBlx, r_tgt);
+    FreeTemp(r_tgt);
+  }
+  if (safepoint_pc) {
+    MarkSafepointPC(call_inst);
+  }
+  return call_inst;
+}
+
+void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  if (arg0.wide == 0) {
+    LoadValueDirectFixed(arg0, TargetReg(kArg0));
+  } else {
+    LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
+  }
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadConstant(TargetReg(kArg0), arg0);
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+                                              RegLocation arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  if (arg1.wide == 0) {
+    LoadValueDirectFixed(arg1, TargetReg(kArg1));
+  } else {
+    LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
+  }
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
+                                              bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg0, TargetReg(kArg0));
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
+                             bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
+                                                      RegLocation arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  if (arg0.wide == 0) {
+    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+    if (arg1.wide == 0) {
+      if (cu_->instruction_set == kMips) {
+        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+      } else {
+        LoadValueDirectFixed(arg1, TargetReg(kArg1));
+      }
+    } else {
+      if (cu_->instruction_set == kMips) {
+        LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+      } else {
+        LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
+      }
+    }
+  } else {
+    LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+    if (arg1.wide == 0) {
+      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+    } else {
+      LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+    }
+  }
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
+  OpRegCopy(TargetReg(kArg0), arg0);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+                                         int arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
+  OpRegCopy(TargetReg(kArg0), arg0);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  LoadConstant(TargetReg(kArg2), arg2);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
+                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg2, TargetReg(kArg2));
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
+                                            int arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg2), arg2);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+                                                         int arg0, RegLocation arg1,
+                                                         RegLocation arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg1, TargetReg(kArg1));
+  if (arg2.wide == 0) {
+    LoadValueDirectFixed(arg2, TargetReg(kArg2));
+  } else {
+    LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
+  }
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
+/*
+ * If there are any ins passed in registers that have not been promoted
+ * to a callee-save register, flush them to the frame.  Perform intial
+ * assignment of promoted arguments.
+ *
+ * ArgLocs is an array of location records describing the incoming arguments
+ * with one location record per word of argument.
+ */
+void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method)
+{
+  /*
+   * Dummy up a RegLocation for the incoming Method*
+   * It will attempt to keep kArg0 live (or copy it to home location
+   * if promoted).
+   */
+  RegLocation rl_src = rl_method;
+  rl_src.location = kLocPhysReg;
+  rl_src.low_reg = TargetReg(kArg0);
+  rl_src.home = false;
+  MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+  StoreValue(rl_method, rl_src);
+  // If Method* has been promoted, explicitly flush
+  if (rl_method.location == kLocPhysReg) {
+    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+  }
+
+  if (cu_->num_ins == 0)
+    return;
+  const int num_arg_regs = 3;
+  static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
+  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+  /*
+   * Copy incoming arguments to their proper home locations.
+   * NOTE: an older version of dx had an issue in which
+   * it would reuse static method argument registers.
+   * This could result in the same Dalvik virtual register
+   * being promoted to both core and fp regs. To account for this,
+   * we only copy to the corresponding promoted physical register
+   * if it matches the type of the SSA name for the incoming
+   * argument.  It is also possible that long and double arguments
+   * end up half-promoted.  In those cases, we must flush the promoted
+   * half to memory as well.
+   */
+  for (int i = 0; i < cu_->num_ins; i++) {
+    PromotionMap* v_map = &promotion_map_[start_vreg + i];
+    if (i < num_arg_regs) {
+      // If arriving in register
+      bool need_flush = true;
+      RegLocation* t_loc = &ArgLocs[i];
+      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
+        OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
+        need_flush = false;
+      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
+        OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
+        need_flush = false;
+      } else {
+        need_flush = true;
+      }
+
+      // For wide args, force flush if only half is promoted
+      if (t_loc->wide) {
+        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
+        need_flush |= (p_map->core_location != v_map->core_location) ||
+            (p_map->fp_location != v_map->fp_location);
+      }
+      if (need_flush) {
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
+                      TargetReg(arg_regs[i]), kWord);
+      }
+    } else {
+      // If arriving in frame & promoted
+      if (v_map->core_location == kLocPhysReg) {
+        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
+                     v_map->core_reg);
+      }
+      if (v_map->fp_location == kLocPhysReg) {
+        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
+                     v_map->FpReg);
+      }
+    }
+  }
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+                          int state, const MethodReference& target_method,
+                          uint32_t unused,
+                          uintptr_t direct_code, uintptr_t direct_method,
+                          InvokeType type)
+{
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+  if (cu->instruction_set != kThumb2) {
+    // Disable sharpening
+    direct_code = 0;
+    direct_method = 0;
+  }
+  if (direct_code != 0 && direct_method != 0) {
+    switch (state) {
+    case 0:  // Get the current Method* [sets kArg0]
+      if (direct_code != static_cast<unsigned int>(-1)) {
+        cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+      } else {
+        CHECK_EQ(cu->dex_file, target_method.dex_file);
+        LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
+                                               target_method.dex_method_index, 0);
+        if (data_target == NULL) {
+          data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
+          data_target->operands[1] = type;
+        }
+        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
+        cg->AppendLIR(load_pc_rel);
+        DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+      }
+      if (direct_method != static_cast<unsigned int>(-1)) {
+        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
+      } else {
+        CHECK_EQ(cu->dex_file, target_method.dex_file);
+        LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
+                                               target_method.dex_method_index, 0);
+        if (data_target == NULL) {
+          data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
+          data_target->operands[1] = type;
+        }
+        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
+        cg->AppendLIR(load_pc_rel);
+        DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+      }
+      break;
+    default:
+      return -1;
+    }
+  } else {
+    switch (state) {
+    case 0:  // Get the current Method* [sets kArg0]
+      // TUNING: we can save a reg copy if Method* has been promoted.
+      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
+      break;
+    case 1:  // Get method->dex_cache_resolved_methods_
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
+        mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
+      // Set up direct code if known.
+      if (direct_code != 0) {
+        if (direct_code != static_cast<unsigned int>(-1)) {
+          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+        } else {
+          CHECK_EQ(cu->dex_file, target_method.dex_file);
+          LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
+                                                 target_method.dex_method_index, 0);
+          if (data_target == NULL) {
+            data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
+            data_target->operands[1] = type;
+          }
+          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
+          cg->AppendLIR(load_pc_rel);
+          DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+        }
+      }
+      break;
+    case 2:  // Grab target method*
+      CHECK_EQ(cu->dex_file, target_method.dex_file);
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
+                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+                           (target_method.dex_method_index * 4),
+                       cg-> TargetReg(kArg0));
+      break;
+    case 3:  // Grab the code from the method*
+      if (cu->instruction_set != kX86) {
+        if (direct_code == 0) {
+          cg->LoadWordDisp(cg->TargetReg(kArg0),
+                           mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
+        }
+        break;
+      }
+      // Intentional fallthrough for x86
+    default:
+      return -1;
+    }
+  }
+  return state + 1;
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in a virtual invoke sequence.
+ * We can use kLr as a temp prior to target address loading
+ * Note also that we'll load the first argument ("this") into
+ * kArg1 here rather than the standard LoadArgRegs.
+ */
+static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
+                         int state, const MethodReference& target_method,
+                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
+                         InvokeType unused3)
+{
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+  /*
+   * This is the fast path in which the target virtual method is
+   * fully resolved at compile time.
+   */
+  switch (state) {
+    case 0: {  // Get "this" [set kArg1]
+      RegLocation  rl_arg = info->args[0];
+      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
+      break;
+    }
+    case 1: // Is "this" null? [use kArg1]
+      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+      // get this->klass_ [use kArg1, set kInvokeTgt]
+      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
+      break;
+    case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
+      break;
+    case 3: // Get target method [use kInvokeTgt, set kArg0]
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
+                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
+                       cg->TargetReg(kArg0));
+      break;
+    case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
+      if (cu->instruction_set != kX86) {
+        cg->LoadWordDisp(cg->TargetReg(kArg0),
+                         mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
+                         cg->TargetReg(kInvokeTgt));
+        break;
+      }
+      // Intentional fallthrough for X86
+    default:
+      return -1;
+  }
+  return state + 1;
+}
+
+/*
+ * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
+ * which will locate the target and continue on via a tail call.
+ */
+static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
+                                 const MethodReference& target_method,
+                                 uint32_t unused, uintptr_t unused2,
+                                 uintptr_t direct_method, InvokeType unused4)
+{
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+  if (cu->instruction_set != kThumb2) {
+    // Disable sharpening
+    direct_method = 0;
+  }
+  int trampoline = (cu->instruction_set == kX86) ? 0
+      : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+
+  if (direct_method != 0) {
+    switch (state) {
+      case 0:  // Load the trampoline target [sets kInvokeTgt].
+        if (cu->instruction_set != kX86) {
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+        }
+        // Get the interface Method* [sets kArg0]
+        if (direct_method != static_cast<unsigned int>(-1)) {
+          cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
+        } else {
+          CHECK_EQ(cu->dex_file, target_method.dex_file);
+          LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
+                                                 target_method.dex_method_index, 0);
+          if (data_target == NULL) {
+            data_target = cg->AddWordData(&cg->method_literal_list_,
+                                          target_method.dex_method_index);
+            data_target->operands[1] = kInterface;
+          }
+          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
+          cg->AppendLIR(load_pc_rel);
+          DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+        }
+        break;
+      default:
+        return -1;
+    }
+  } else {
+    switch (state) {
+      case 0:
+        // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
+        cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
+        // Load the trampoline target [sets kInvokeTgt].
+        if (cu->instruction_set != kX86) {
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+        }
+        break;
+    case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
+                       mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+                       cg->TargetReg(kArg0));
+      break;
+    case 2:  // Grab target method* [set/use kArg0]
+      CHECK_EQ(cu->dex_file, target_method.dex_file);
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
+                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+                           (target_method.dex_method_index * 4),
+                       cg->TargetReg(kArg0));
+      break;
+    default:
+      return -1;
+    }
+  }
+  return state + 1;
+}
+
+static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
+                            int state, const MethodReference& target_method,
+                            uint32_t method_idx)
+{
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+  /*
+   * This handles the case in which the base method is not fully
+   * resolved at compile time, we bail to a runtime helper.
+   */
+  if (state == 0) {
+    if (cu->instruction_set != kX86) {
+      // Load trampoline target
+      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+    }
+    // Load kArg0 with method index
+    CHECK_EQ(cu->dex_file, target_method.dex_file);
+    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
+    return 1;
+  }
+  return -1;
+}
+
+static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
+                                int state,
+                                const MethodReference& target_method,
+                                uint32_t method_idx,
+                                uintptr_t unused, uintptr_t unused2,
+                                InvokeType unused3)
+{
+  int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
+}
+
+static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+                                const MethodReference& target_method,
+                                uint32_t method_idx, uintptr_t unused,
+                                uintptr_t unused2, InvokeType unused3)
+{
+  int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
+}
+
+static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+                               const MethodReference& target_method,
+                               uint32_t method_idx, uintptr_t unused,
+                               uintptr_t unused2, InvokeType unused3)
+{
+  int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
+}
+
+static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+                           const MethodReference& target_method,
+                           uint32_t method_idx, uintptr_t unused,
+                           uintptr_t unused2, InvokeType unused3)
+{
+  int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
+}
+
+static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
+                                                CallInfo* info, int state,
+                                                const MethodReference& target_method,
+                                                uint32_t unused,
+                                                uintptr_t unused2, uintptr_t unused3,
+                                                InvokeType unused4)
+{
+  int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
+}
+
+int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
+                         NextCallInsn next_call_insn,
+                         const MethodReference& target_method,
+                         uint32_t vtable_idx, uintptr_t direct_code,
+                         uintptr_t direct_method, InvokeType type, bool skip_this)
+{
+  int last_arg_reg = TargetReg(kArg3);
+  int next_reg = TargetReg(kArg1);
+  int next_arg = 0;
+  if (skip_this) {
+    next_reg++;
+    next_arg++;
+  }
+  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
+    RegLocation rl_arg = info->args[next_arg++];
+    rl_arg = UpdateRawLoc(rl_arg);
+    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
+      LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
+      next_reg++;
+      next_arg++;
+    } else {
+      if (rl_arg.wide) {
+        rl_arg.wide = false;
+        rl_arg.is_const = false;
+      }
+      LoadValueDirectFixed(rl_arg, next_reg);
+    }
+    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                                direct_code, direct_method, type);
+  }
+  return call_state;
+}
+
+/*
+ * Load up to 5 arguments, the first three of which will be in
+ * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
+ * and as part of the load sequence, it must be replaced with
+ * the target method pointer.  Note, this may also be called
+ * for "range" variants if the number of arguments is 5 or fewer.
+ */
+int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
+                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+                                  const MethodReference& target_method,
+                                  uint32_t vtable_idx, uintptr_t direct_code,
+                                  uintptr_t direct_method, InvokeType type, bool skip_this)
+{
+  RegLocation rl_arg;
+
+  /* If no arguments, just return */
+  if (info->num_arg_words == 0)
+    return call_state;
+
+  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                              direct_code, direct_method, type);
+
+  DCHECK_LE(info->num_arg_words, 5);
+  if (info->num_arg_words > 3) {
+    int32_t next_use = 3;
+    //Detect special case of wide arg spanning arg3/arg4
+    RegLocation rl_use0 = info->args[0];
+    RegLocation rl_use1 = info->args[1];
+    RegLocation rl_use2 = info->args[2];
+    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
+      rl_use2.wide) {
+      int reg = -1;
+      // Wide spans, we need the 2nd half of uses[2].
+      rl_arg = UpdateLocWide(rl_use2);
+      if (rl_arg.location == kLocPhysReg) {
+        reg = rl_arg.high_reg;
+      } else {
+        // kArg2 & rArg3 can safely be used here
+        reg = TargetReg(kArg3);
+        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
+        call_state = next_call_insn(cu_, info, call_state, target_method,
+                                    vtable_idx, direct_code, direct_method, type);
+      }
+      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
+      StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                                  direct_code, direct_method, type);
+      next_use++;
+    }
+    // Loop through the rest
+    while (next_use < info->num_arg_words) {
+      int low_reg;
+      int high_reg = -1;
+      rl_arg = info->args[next_use];
+      rl_arg = UpdateRawLoc(rl_arg);
+      if (rl_arg.location == kLocPhysReg) {
+        low_reg = rl_arg.low_reg;
+        high_reg = rl_arg.high_reg;
+      } else {
+        low_reg = TargetReg(kArg2);
+        if (rl_arg.wide) {
+          high_reg = TargetReg(kArg3);
+          LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
+        } else {
+          LoadValueDirectFixed(rl_arg, low_reg);
+        }
+        call_state = next_call_insn(cu_, info, call_state, target_method,
+                                    vtable_idx, direct_code, direct_method, type);
+      }
+      int outs_offset = (next_use + 1) * 4;
+      if (rl_arg.wide) {
+        StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
+        next_use += 2;
+      } else {
+        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
+        next_use++;
+      }
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                               direct_code, direct_method, type);
+    }
+  }
+
+  call_state = LoadArgRegs(info, call_state, next_call_insn,
+                           target_method, vtable_idx, direct_code, direct_method,
+                           type, skip_this);
+
+  if (pcrLabel) {
+    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+  }
+  return call_state;
+}
+
+/*
+ * May have 0+ arguments (also used for jumbo).  Note that
+ * source virtual registers may be in physical registers, so may
+ * need to be flushed to home location before copying.  This
+ * applies to arg3 and above (see below).
+ *
+ * Two general strategies:
+ *    If < 20 arguments
+ *       Pass args 3-18 using vldm/vstm block copy
+ *       Pass arg0, arg1 & arg2 in kArg1-kArg3
+ *    If 20+ arguments
+ *       Pass args arg19+ using memcpy block copy
+ *       Pass arg0, arg1 & arg2 in kArg1-kArg3
+ *
+ */
+int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
+                                LIR** pcrLabel, NextCallInsn next_call_insn,
+                                const MethodReference& target_method,
+                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
+                                InvokeType type, bool skip_this)
+{
+
+  // If we can treat it as non-range (Jumbo ops will use range form)
+  if (info->num_arg_words <= 5)
+    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
+                                next_call_insn, target_method, vtable_idx,
+                                direct_code, direct_method, type, skip_this);
+  /*
+   * First load the non-register arguments.  Both forms expect all
+   * of the source arguments to be in their home frame location, so
+   * scan the s_reg names and flush any that have been promoted to
+   * frame backing storage.
+   */
+  // Scan the rest of the args - if in phys_reg flush to memory
+  for (int next_arg = 0; next_arg < info->num_arg_words;) {
+    RegLocation loc = info->args[next_arg];
+    if (loc.wide) {
+      loc = UpdateLocWide(loc);
+      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
+        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
+                          loc.low_reg, loc.high_reg);
+      }
+      next_arg += 2;
+    } else {
+      loc = UpdateLoc(loc);
+      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
+                      loc.low_reg, kWord);
+      }
+      next_arg++;
+    }
+  }
+
+  int start_offset = SRegOffset(info->args[3].s_reg_low);
+  int outs_offset = 4 /* Method* */ + (3 * 4);
+  if (cu_->instruction_set != kThumb2) {
+    // Generate memcpy
+    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+    CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
+  } else {
+    if (info->num_arg_words >= 20) {
+      // Generate memcpy
+      OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+      OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+      CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+                                 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
+    } else {
+      // Use vldm/vstm pair using kArg3 as a temp
+      int regs_left = std::min(info->num_arg_words - 3, 16);
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                               direct_code, direct_method, type);
+      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
+      LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
+      //TUNING: loosen barrier
+      ld->def_mask = ENCODE_ALL;
+      SetMemRefType(ld, true /* is_load */, kDalvikReg);
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                               direct_code, direct_method, type);
+      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                               direct_code, direct_method, type);
+      LIR* st = OpVstm(TargetReg(kArg3), regs_left);
+      SetMemRefType(st, false /* is_load */, kDalvikReg);
+      st->def_mask = ENCODE_ALL;
+      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                               direct_code, direct_method, type);
+    }
+  }
+
+  call_state = LoadArgRegs(info, call_state, next_call_insn,
+                           target_method, vtable_idx, direct_code, direct_method,
+                           type, skip_this);
+
+  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+                           direct_code, direct_method, type);
+  if (pcrLabel) {
+    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+  }
+  return call_state;
+}
+
+RegLocation Mir2Lir::InlineTarget(CallInfo* info)
+{
+  RegLocation res;
+  if (info->result.location == kLocInvalid) {
+    res = GetReturn(false);
+  } else {
+    res = info->result;
+  }
+  return res;
+}
+
+RegLocation Mir2Lir::InlineTargetWide(CallInfo* info)
+{
+  RegLocation res;
+  if (info->result.location == kLocInvalid) {
+    res = GetReturnWide(false);
+  } else {
+    res = info->result;
+  }
+  return res;
+}
+
+bool Mir2Lir::GenInlinedCharAt(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  // Location of reference to data array
+  int value_offset = mirror::String::ValueOffset().Int32Value();
+  // Location of count
+  int count_offset = mirror::String::CountOffset().Int32Value();
+  // Starting offset within data array
+  int offset_offset = mirror::String::OffsetOffset().Int32Value();
+  // Start of char data with array_
+  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
+
+  RegLocation rl_obj = info->args[0];
+  RegLocation rl_idx = info->args[1];
+  rl_obj = LoadValue(rl_obj, kCoreReg);
+  rl_idx = LoadValue(rl_idx, kCoreReg);
+  int reg_max;
+  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
+  LIR* launch_pad = NULL;
+  int reg_off = INVALID_REG;
+  int reg_ptr = INVALID_REG;
+  if (cu_->instruction_set != kX86) {
+    reg_off = AllocTemp();
+    reg_ptr = AllocTemp();
+    if (range_check) {
+      reg_max = AllocTemp();
+      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
+    }
+    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+    if (range_check) {
+      // Set up a launch pad to allow retry in case of bounds violation */
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      intrinsic_launchpads_.Insert(launch_pad);
+      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+      FreeTemp(reg_max);
+      OpCondBranch(kCondCs, launch_pad);
+   }
+  } else {
+    if (range_check) {
+      reg_max = AllocTemp();
+      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
+      // Set up a launch pad to allow retry in case of bounds violation */
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      intrinsic_launchpads_.Insert(launch_pad);
+      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+      FreeTemp(reg_max);
+      OpCondBranch(kCondCc, launch_pad);
+    }
+    reg_off = AllocTemp();
+    reg_ptr = AllocTemp();
+    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+  }
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
+  OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
+  FreeTemp(rl_obj.low_reg);
+  FreeTemp(rl_idx.low_reg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+  FreeTemp(reg_off);
+  FreeTemp(reg_ptr);
+  StoreValue(rl_dest, rl_result);
+  if (range_check) {
+    launch_pad->operands[2] = 0;  // no resumption
+  }
+  // Record that we've already inlined & null checked
+  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+  return true;
+}
+
+// Generates an inlined String.is_empty or String.length.
+bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  // dst = src.length();
+  RegLocation rl_obj = info->args[0];
+  rl_obj = LoadValue(rl_obj, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+  LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
+  if (is_empty) {
+    // dst = (dst == 0);
+    if (cu_->instruction_set == kThumb2) {
+      int t_reg = AllocTemp();
+      OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
+      OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
+    } else {
+      DCHECK_EQ(cu_->instruction_set, kX86);
+      OpRegImm(kOpSub, rl_result.low_reg, 1);
+      OpRegImm(kOpLsr, rl_result.low_reg, 31);
+    }
+  }
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+bool Mir2Lir::GenInlinedAbsInt(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  RegLocation rl_src = info->args[0];
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int sign_reg = AllocTemp();
+  // abs(x) = y<=x>>31, (x+y)^y.
+  OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+  OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+bool Mir2Lir::GenInlinedAbsLong(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  if (cu_->instruction_set == kThumb2) {
+    RegLocation rl_src = info->args[0];
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_dest = InlineTargetWide(info);
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    int sign_reg = AllocTemp();
+    // abs(x) = y<=x>>31, (x+y)^y.
+    OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
+    OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+    OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+    StoreValueWide(rl_dest, rl_result);
+    return true;
+  } else {
+    DCHECK_EQ(cu_->instruction_set, kX86);
+    // Reuse source registers to avoid running out of temps
+    RegLocation rl_src = info->args[0];
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_dest = InlineTargetWide(info);
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+    FreeTemp(rl_src.low_reg);
+    FreeTemp(rl_src.high_reg);
+    int sign_reg = AllocTemp();
+    // abs(x) = y<=x>>31, (x+y)^y.
+    OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
+    OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+    StoreValueWide(rl_dest, rl_result);
+    return true;
+  }
+}
+
+bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_src);
+  return true;
+}
+
+bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  StoreValueWide(rl_dest, rl_src);
+  return true;
+}
+
+/*
+ * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
+ * otherwise bails to standard library code.
+ */
+bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
+  int reg_ptr = TargetReg(kArg0);
+  int reg_char = TargetReg(kArg1);
+  int reg_start = TargetReg(kArg2);
+
+  RegLocation rl_obj = info->args[0];
+  RegLocation rl_char = info->args[1];
+  RegLocation rl_start = info->args[2];
+  LoadValueDirectFixed(rl_obj, reg_ptr);
+  LoadValueDirectFixed(rl_char, reg_char);
+  if (zero_based) {
+    LoadConstant(reg_start, 0);
+  } else {
+    LoadValueDirectFixed(rl_start, reg_start);
+  }
+  int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+  GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  intrinsic_launchpads_.Insert(launch_pad);
+  OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
+  // NOTE: not a safepoint
+  if (cu_->instruction_set != kX86) {
+    OpReg(kOpBlx, r_tgt);
+  } else {
+    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+  }
+  LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
+  launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
+  // Record that we've already inlined & null checked
+  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+  RegLocation rl_return = GetReturn(false);
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_return);
+  return true;
+}
+
+/* Fast string.compareTo(Ljava/lang/string;)I. */
+bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info)
+{
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
+  int reg_this = TargetReg(kArg0);
+  int reg_cmp = TargetReg(kArg1);
+
+  RegLocation rl_this = info->args[0];
+  RegLocation rl_cmp = info->args[1];
+  LoadValueDirectFixed(rl_this, reg_this);
+  LoadValueDirectFixed(rl_cmp, reg_cmp);
+  int r_tgt = (cu_->instruction_set != kX86) ?
+      LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+  GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
+  //TUNING: check if rl_cmp.s_reg_low is already null checked
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  intrinsic_launchpads_.Insert(launch_pad);
+  OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
+  // NOTE: not a safepoint
+  if (cu_->instruction_set != kX86) {
+    OpReg(kOpBlx, r_tgt);
+  } else {
+    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+  }
+  launch_pad->operands[2] = 0;  // No return possible
+  // Record that we've already inlined & null checked
+  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+  RegLocation rl_return = GetReturn(false);
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_return);
+  return true;
+}
+
+bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int offset = Thread::PeerOffset().Int32Value();
+  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
+    LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
+  } else {
+    CHECK(cu_->instruction_set == kX86);
+    ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
+  }
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
+                                  bool is_long, bool is_volatile) {
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  // Unused - RegLocation rl_src_unsafe = info->args[0];
+  RegLocation rl_src_obj = info->args[1];  // Object
+  RegLocation rl_src_offset = info->args[2];  // long low
+  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
+  RegLocation rl_dest = InlineTarget(info);  // result reg
+  if (is_volatile) {
+    GenMemBarrier(kLoadLoad);
+  }
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (is_long) {
+    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+    LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
+    StoreValue(rl_dest, rl_result);
+  }
+  return true;
+}
+
+bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
+                                  bool is_object, bool is_volatile, bool is_ordered) {
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  if (cu_->instruction_set == kX86 && is_object) {
+    // TODO: fix X86, it exhausts registers for card marking.
+    return false;
+  }
+  // Unused - RegLocation rl_src_unsafe = info->args[0];
+  RegLocation rl_src_obj = info->args[1];  // Object
+  RegLocation rl_src_offset = info->args[2];  // long low
+  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
+  RegLocation rl_src_value = info->args[4];  // value to store
+  if (is_volatile || is_ordered) {
+    GenMemBarrier(kStoreStore);
+  }
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+  RegLocation rl_value;
+  if (is_long) {
+    rl_value = LoadValueWide(rl_src_value, kCoreReg);
+    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+    StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+  } else {
+    rl_value = LoadValue(rl_src_value, kCoreReg);
+    StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
+  }
+  if (is_volatile) {
+    GenMemBarrier(kStoreLoad);
+  }
+  if (is_object) {
+    MarkGCCard(rl_value.low_reg, rl_object.low_reg);
+  }
+  return true;
+}
+
+bool Mir2Lir::GenIntrinsic(CallInfo* info)
+{
+  if (info->opt_flags & MIR_INLINED) {
+    return false;
+  }
+  /*
+   * TODO: move these to a target-specific structured constant array
+   * and use a generic match function.  The list of intrinsics may be
+   * slightly different depending on target.
+   * TODO: Fold this into a matching function that runs during
+   * basic block building.  This should be part of the action for
+   * small method inlining and recognition of the special object init
+   * method.  By doing this during basic block construction, we can also
+   * take advantage of/generate new useful dataflow info.
+   */
+  StringPiece tgt_methods_declaring_class(
+      cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
+  if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
+      return GenInlinedDoubleCvt(info);
+    }
+    if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
+      return GenInlinedDoubleCvt(info);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
+      return GenInlinedFloatCvt(info);
+    }
+    if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
+      return GenInlinedFloatCvt(info);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
+             tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "int java.lang.Math.abs(int)" ||
+        tgt_method == "int java.lang.StrictMath.abs(int)") {
+      return GenInlinedAbsInt(info);
+    }
+    if (tgt_method == "long java.lang.Math.abs(long)" ||
+        tgt_method == "long java.lang.StrictMath.abs(long)") {
+      return GenInlinedAbsLong(info);
+    }
+    if (tgt_method == "int java.lang.Math.max(int, int)" ||
+        tgt_method == "int java.lang.StrictMath.max(int, int)") {
+      return GenInlinedMinMaxInt(info, false /* is_min */);
+    }
+    if (tgt_method == "int java.lang.Math.min(int, int)" ||
+        tgt_method == "int java.lang.StrictMath.min(int, int)") {
+      return GenInlinedMinMaxInt(info, true /* is_min */);
+    }
+    if (tgt_method == "double java.lang.Math.sqrt(double)" ||
+        tgt_method == "double java.lang.StrictMath.sqrt(double)") {
+      return GenInlinedSqrt(info);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "char java.lang.String.charAt(int)") {
+      return GenInlinedCharAt(info);
+    }
+    if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
+      return GenInlinedStringCompareTo(info);
+    }
+    if (tgt_method == "boolean java.lang.String.is_empty()") {
+      return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
+    }
+    if (tgt_method == "int java.lang.String.index_of(int, int)") {
+      return GenInlinedIndexOf(info, false /* base 0 */);
+    }
+    if (tgt_method == "int java.lang.String.index_of(int)") {
+      return GenInlinedIndexOf(info, true /* base 0 */);
+    }
+    if (tgt_method == "int java.lang.String.length()") {
+      return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
+      return GenInlinedCurrentThread(info);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
+      return GenInlinedCas32(info, false);
+    }
+    if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
+      return GenInlinedCas32(info, true);
+    }
+    if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
+    }
+    if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
+                                 false /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
+                                 true /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
+                                 false /* is_volatile */, true /* is_ordered */);
+    }
+    if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
+    }
+    if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
+                                 false /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
+                                 true /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
+                                 false /* is_volatile */, true /* is_ordered */);
+    }
+    if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
+    }
+    if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
+      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
+                                 false /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
+                                 true /* is_volatile */, false /* is_ordered */);
+    }
+    if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
+                                 false /* is_volatile */, true /* is_ordered */);
+    }
+  }
+  return false;
+}
+
+void Mir2Lir::GenInvoke(CallInfo* info)
+{
+  if (GenIntrinsic(info)) {
+    return;
+  }
+  InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
+  int call_state = 0;
+  LIR* null_ck;
+  LIR** p_null_ck = NULL;
+  NextCallInsn next_call_insn;
+  FlushAllRegs();  /* Everything to home location */
+  // Explicit register usage
+  LockCallTemps();
+
+  DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
+  MethodReference target_method(cUnit->GetDexFile(), info->index);
+  int vtable_idx;
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  bool skip_this;
+  bool fast_path =
+      cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
+                                              current_dalvik_offset_,
+                                              info->type, target_method,
+                                              vtable_idx,
+                                              direct_code, direct_method,
+                                              true) && !SLOW_INVOKE_PATH;
+  if (info->type == kInterface) {
+    if (fast_path) {
+      p_null_ck = &null_ck;
+    }
+    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
+    skip_this = false;
+  } else if (info->type == kDirect) {
+    if (fast_path) {
+      p_null_ck = &null_ck;
+    }
+    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
+    skip_this = false;
+  } else if (info->type == kStatic) {
+    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
+    skip_this = false;
+  } else if (info->type == kSuper) {
+    DCHECK(!fast_path);  // Fast path is a direct call.
+    next_call_insn = NextSuperCallInsnSP;
+    skip_this = false;
+  } else {
+    DCHECK_EQ(info->type, kVirtual);
+    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
+    skip_this = fast_path;
+  }
+  if (!info->is_range) {
+    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
+                                      next_call_insn, target_method,
+                                      vtable_idx, direct_code, direct_method,
+                                      original_type, skip_this);
+  } else {
+    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
+                                    next_call_insn, target_method, vtable_idx,
+                                    direct_code, direct_method, original_type,
+                                    skip_this);
+  }
+  // Finish up any of the call sequence not interleaved in arg loading
+  while (call_state >= 0) {
+    call_state = next_call_insn(cu_, info, call_state, target_method,
+                                vtable_idx, direct_code, direct_method,
+                                original_type);
+  }
+  LIR* call_inst;
+  if (cu_->instruction_set != kX86) {
+    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
+  } else {
+    if (fast_path && info->type != kInterface) {
+      call_inst = OpMem(kOpBlx, TargetReg(kArg0),
+                        mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
+    } else {
+      int trampoline = 0;
+      switch (info->type) {
+      case kInterface:
+        trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+            : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+        break;
+      case kDirect:
+        trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+        break;
+      case kStatic:
+        trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+        break;
+      case kSuper:
+        trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+        break;
+      case kVirtual:
+        trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected invoke type";
+      }
+      call_inst = OpThreadMem(kOpBlx, trampoline);
+    }
+  }
+  MarkSafepointPC(call_inst);
+
+  ClobberCalleeSave();
+  if (info->result.location != kLocInvalid) {
+    // We have a following MOVE_RESULT - do it now.
+    if (info->result.wide) {
+      RegLocation ret_loc = GetReturnWide(info->result.fp);
+      StoreValueWide(info->result, ret_loc);
+    } else {
+      RegLocation ret_loc = GetReturn(info->result.fp);
+      StoreValue(info->result, ret_loc);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
new file mode 100644
index 0000000..6a25c1d
--- /dev/null
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_ir.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "invoke_type.h"
+
+namespace art {
+
+/* This file contains target-independent codegen and support. */
+
+/*
+ * Load an immediate value into a fixed or temp register.  Target
+ * register is clobbered, and marked in_use.
+ */
+LIR* Mir2Lir::LoadConstant(int r_dest, int value)
+{
+  if (IsTemp(r_dest)) {
+    Clobber(r_dest);
+    MarkInUse(r_dest);
+  }
+  return LoadConstantNoClobber(r_dest, value);
+}
+
+/*
+ * Temporary workaround for Issue 7250540.  If we're loading a constant zero into a
+ * promoted floating point register, also copy a zero into the int/ref identity of
+ * that sreg.
+ */
+void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg)
+{
+  if (rl_dest.fp) {
+    int pmap_index = SRegToPMap(rl_dest.s_reg_low);
+    if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
+      // Now, determine if this vreg is ever used as a reference.  If not, we're done.
+      bool used_as_reference = false;
+      int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+      for (int i = 0; !used_as_reference && (i < mir_graph_->GetNumSSARegs()); i++) {
+        if (mir_graph_->SRegToVReg(mir_graph_->reg_location_[i].s_reg_low) == base_vreg) {
+          used_as_reference |= mir_graph_->reg_location_[i].ref;
+        }
+      }
+      if (!used_as_reference) {
+        return;
+      }
+      int temp_reg = zero_reg;
+      if (temp_reg == INVALID_REG) {
+        temp_reg = AllocTemp();
+        LoadConstant(temp_reg, 0);
+      }
+      if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
+        // Promoted - just copy in a zero
+        OpRegCopy(promotion_map_[pmap_index].core_reg, temp_reg);
+      } else {
+        // Lives in the frame, need to store.
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, kWord);
+      }
+      if (zero_reg == INVALID_REG) {
+        FreeTemp(temp_reg);
+      }
+    }
+  }
+}
+
+/* Load a word at base + displacement.  Displacement must be word multiple */
+LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest)
+{
+  return LoadBaseDisp(rBase, displacement, r_dest, kWord,
+                      INVALID_SREG);
+}
+
+LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src)
+{
+  return StoreBaseDisp(rBase, displacement, r_src, kWord);
+}
+
+/*
+ * Load a Dalvik register into a physical register.  Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness.  That is the responsibility of the caller.
+ */
+void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest)
+{
+  rl_src = UpdateLoc(rl_src);
+  if (rl_src.location == kLocPhysReg) {
+    OpRegCopy(r_dest, rl_src.low_reg);
+  } else if (IsInexpensiveConstant(rl_src)) {
+    LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
+  } else {
+    DCHECK((rl_src.location == kLocDalvikFrame) ||
+           (rl_src.location == kLocCompilerTemp));
+    LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+  }
+}
+
+/*
+ * Similar to LoadValueDirect, but clobbers and allocates the target
+ * register.  Should be used when loading to a fixed register (for example,
+ * loading arguments to an out of line call.
+ */
+void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest)
+{
+  Clobber(r_dest);
+  MarkInUse(r_dest);
+  LoadValueDirect(rl_src, r_dest);
+}
+
+/*
+ * Load a Dalvik register pair into a physical register[s].  Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness.  That is the responsibility of the caller.
+ */
+void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo,
+             int reg_hi)
+{
+  rl_src = UpdateLocWide(rl_src);
+  if (rl_src.location == kLocPhysReg) {
+    OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
+  } else if (IsInexpensiveConstant(rl_src)) {
+    LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
+  } else {
+    DCHECK((rl_src.location == kLocDalvikFrame) ||
+           (rl_src.location == kLocCompilerTemp));
+    LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low),
+                     reg_lo, reg_hi, INVALID_SREG);
+  }
+}
+
+/*
+ * Similar to LoadValueDirect, but clobbers and allocates the target
+ * registers.  Should be used when loading to a fixed registers (for example,
+ * loading arguments to an out of line call.
+ */
+void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo,
+                                       int reg_hi)
+{
+  Clobber(reg_lo);
+  Clobber(reg_hi);
+  MarkInUse(reg_lo);
+  MarkInUse(reg_hi);
+  LoadValueDirectWide(rl_src, reg_lo, reg_hi);
+}
+
+RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind)
+{
+  rl_src = EvalLoc(rl_src, op_kind, false);
+  if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
+    LoadValueDirect(rl_src, rl_src.low_reg);
+    rl_src.location = kLocPhysReg;
+    MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+  }
+  return rl_src;
+}
+
+void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src)
+{
+  /*
+   * Sanity checking - should never try to store to the same
+   * ssa name during the compilation of a single instruction
+   * without an intervening ClobberSReg().
+   */
+  if (kIsDebugBuild) {
+    DCHECK((live_sreg_ == INVALID_SREG) ||
+           (rl_dest.s_reg_low != live_sreg_));
+    live_sreg_ = rl_dest.s_reg_low;
+  }
+  LIR* def_start;
+  LIR* def_end;
+  DCHECK(!rl_dest.wide);
+  DCHECK(!rl_src.wide);
+  rl_src = UpdateLoc(rl_src);
+  rl_dest = UpdateLoc(rl_dest);
+  if (rl_src.location == kLocPhysReg) {
+    if (IsLive(rl_src.low_reg) ||
+      IsPromoted(rl_src.low_reg) ||
+      (rl_dest.location == kLocPhysReg)) {
+      // Src is live/promoted or Dest has assigned reg.
+      rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+      OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
+    } else {
+      // Just re-assign the registers.  Dest gets Src's regs
+      rl_dest.low_reg = rl_src.low_reg;
+      Clobber(rl_src.low_reg);
+    }
+  } else {
+    // Load Src either into promoted Dest or temps allocated for Dest
+    rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+    LoadValueDirect(rl_src, rl_dest.low_reg);
+  }
+
+  // Dest is now live and dirty (until/if we flush it to home location)
+  MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+  MarkDirty(rl_dest);
+
+
+  ResetDefLoc(rl_dest);
+  if (IsDirty(rl_dest.low_reg) &&
+      oat_live_out(rl_dest.s_reg_low)) {
+    def_start = last_lir_insn_;
+    StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
+                  rl_dest.low_reg, kWord);
+    MarkClean(rl_dest);
+    def_end = last_lir_insn_;
+    if (!rl_dest.ref) {
+      // Exclude references from store elimination
+      MarkDef(rl_dest, def_start, def_end);
+    }
+  }
+}
+
+RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind)
+{
+  DCHECK(rl_src.wide);
+  rl_src = EvalLoc(rl_src, op_kind, false);
+  if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
+    LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg);
+    rl_src.location = kLocPhysReg;
+    MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+    MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
+  }
+  return rl_src;
+}
+
+void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src)
+{
+  /*
+   * Sanity checking - should never try to store to the same
+   * ssa name during the compilation of a single instruction
+   * without an intervening ClobberSReg().
+   */
+  if (kIsDebugBuild) {
+    DCHECK((live_sreg_ == INVALID_SREG) ||
+           (rl_dest.s_reg_low != live_sreg_));
+    live_sreg_ = rl_dest.s_reg_low;
+  }
+  LIR* def_start;
+  LIR* def_end;
+  DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_src.wide);
+  if (rl_src.location == kLocPhysReg) {
+    if (IsLive(rl_src.low_reg) ||
+        IsLive(rl_src.high_reg) ||
+        IsPromoted(rl_src.low_reg) ||
+        IsPromoted(rl_src.high_reg) ||
+        (rl_dest.location == kLocPhysReg)) {
+      // Src is live or promoted or Dest has assigned reg.
+      rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+      OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg,
+                    rl_src.low_reg, rl_src.high_reg);
+    } else {
+      // Just re-assign the registers.  Dest gets Src's regs
+      rl_dest.low_reg = rl_src.low_reg;
+      rl_dest.high_reg = rl_src.high_reg;
+      Clobber(rl_src.low_reg);
+      Clobber(rl_src.high_reg);
+    }
+  } else {
+    // Load Src either into promoted Dest or temps allocated for Dest
+    rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+    LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg);
+  }
+
+  // Dest is now live and dirty (until/if we flush it to home location)
+  MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+  MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+  MarkDirty(rl_dest);
+  MarkPair(rl_dest.low_reg, rl_dest.high_reg);
+
+
+  ResetDefLocWide(rl_dest);
+  if ((IsDirty(rl_dest.low_reg) ||
+      IsDirty(rl_dest.high_reg)) &&
+      (oat_live_out(rl_dest.s_reg_low) ||
+      oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
+    def_start = last_lir_insn_;
+    DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
+              mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
+    StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
+                      rl_dest.low_reg, rl_dest.high_reg);
+    MarkClean(rl_dest);
+    def_end = last_lir_insn_;
+    MarkDefWide(rl_dest, def_start, def_end);
+  }
+}
+
+/* Utilities to load the current Method* */
+void Mir2Lir::LoadCurrMethodDirect(int r_tgt)
+{
+  LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
+}
+
+RegLocation Mir2Lir::LoadCurrMethod()
+{
+  return LoadValue(mir_graph_->GetMethodLoc(), kCoreReg);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
new file mode 100644
index 0000000..ac654d8
--- /dev/null
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+#define DEBUG_OPT(X)
+
+/* Check RAW, WAR, and RAW dependency on the register operands */
+#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
+                                        ((use | def) & check->def_mask))
+
+/* Scheduler heuristics */
+#define MAX_HOIST_DISTANCE 20
+#define LDLD_DISTANCE 4
+#define LD_LATENCY 2
+
+static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2)
+{
+  int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
+  int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
+  int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
+  int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
+
+  return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
+}
+
+/* Convert a more expensive instruction (ie load) into a move */
+void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src)
+{
+  /* Insert a move to replace the load */
+  LIR* move_lir;
+  move_lir = OpRegCopyNoInsert(dest, src);
+  /*
+   * Insert the converted instruction after the original since the
+   * optimization is scannng in the top-down order and the new instruction
+   * will need to be re-checked (eg the new dest clobbers the src used in
+   * this_lir).
+   */
+  InsertLIRAfter(orig_lir, move_lir);
+}
+
+/*
+ * Perform a pass of top-down walk, from the second-last instruction in the
+ * superblock, to eliminate redundant loads and stores.
+ *
+ * An earlier load can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * An earlier store can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * A later store can be eliminated by an earlier store iff
+ *   1) They are must-aliases
+ *   2) The memory location is not written to in between
+ */
+void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir)
+{
+  LIR* this_lir;
+
+  if (head_lir == tail_lir) return;
+
+  for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
+
+    if (is_pseudo_opcode(this_lir->opcode)) continue;
+
+    int sink_distance = 0;
+
+    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+
+    /* Skip non-interesting instructions */
+    if ((this_lir->flags.is_nop == true) ||
+        (target_flags & IS_BRANCH) ||
+        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||  // Skip wide loads.
+        ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
+         (REG_USE0 | REG_USE1 | REG_USE2)) ||  // Skip wide stores.
+        !(target_flags & (IS_LOAD | IS_STORE))) {
+      continue;
+    }
+
+    int native_reg_id;
+    if (cu_->instruction_set == kX86) {
+      // If x86, location differs depending on whether memory/reg operation.
+      native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
+          : this_lir->operands[0];
+    } else {
+      native_reg_id = this_lir->operands[0];
+    }
+    bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+    LIR* check_lir;
+    /* Use the mem mask to determine the rough memory location */
+    uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
+
+    /*
+     * Currently only eliminate redundant ld/st for constant and Dalvik
+     * register accesses.
+     */
+    if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+
+    uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+    uint64_t stop_use_reg_mask;
+    if (cu_->instruction_set == kX86) {
+      stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
+    } else {
+      /*
+       * Add pc to the resource mask to prevent this instruction
+       * from sinking past branch instructions. Also take out the memory
+       * region bits since stop_mask is used to check data/control
+       * dependencies.
+       */
+        stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+    }
+
+    for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
+
+      /*
+       * Skip already dead instructions (whose dataflow information is
+       * outdated and misleading).
+       */
+      if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue;
+
+      uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
+      uint64_t alias_condition = this_mem_mask & check_mem_mask;
+      bool stop_here = false;
+
+      /*
+       * Potential aliases seen - check the alias relations
+       */
+      uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
+      // TUNING: Support instructions with multiple register targets.
+      if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+        stop_here = true;
+      } else if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+        bool is_check_lir_load = check_flags & IS_LOAD;
+        if  (alias_condition == ENCODE_LITERAL) {
+          /*
+           * Should only see literal loads in the instruction
+           * stream.
+           */
+          DCHECK(!(check_flags & IS_STORE));
+          /* Same value && same register type */
+          if (check_lir->alias_info == this_lir->alias_info &&
+              SameRegType(check_lir->operands[0], native_reg_id)) {
+            /*
+             * Different destination register - insert
+             * a move
+             */
+            if (check_lir->operands[0] != native_reg_id) {
+              ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
+            }
+            check_lir->flags.is_nop = true;
+          }
+        } else if (alias_condition == ENCODE_DALVIK_REG) {
+          /* Must alias */
+          if (check_lir->alias_info == this_lir->alias_info) {
+            /* Only optimize compatible registers */
+            bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
+            if ((is_this_lir_load && is_check_lir_load) ||
+                (!is_this_lir_load && is_check_lir_load)) {
+              /* RAR or RAW */
+              if (reg_compatible) {
+                /*
+                 * Different destination register -
+                 * insert a move
+                 */
+                if (check_lir->operands[0] !=
+                  native_reg_id) {
+                  ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
+                }
+                check_lir->flags.is_nop = true;
+              } else {
+                /*
+                 * Destinaions are of different types -
+                 * something complicated going on so
+                 * stop looking now.
+                 */
+                stop_here = true;
+              }
+            } else if (is_this_lir_load && !is_check_lir_load) {
+              /* WAR - register value is killed */
+              stop_here = true;
+            } else if (!is_this_lir_load && !is_check_lir_load) {
+              /* WAW - nuke the earlier store */
+              this_lir->flags.is_nop = true;
+              stop_here = true;
+            }
+          /* Partial overlap */
+          } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
+            /*
+             * It is actually ok to continue if check_lir
+             * is a read. But it is hard to make a test
+             * case for this so we just stop here to be
+             * conservative.
+             */
+            stop_here = true;
+          }
+        }
+        /* Memory content may be updated. Stop looking now. */
+        if (stop_here) {
+          break;
+        /* The check_lir has been transformed - check the next one */
+        } else if (check_lir->flags.is_nop) {
+          continue;
+        }
+      }
+
+
+      /*
+       * this and check LIRs have no memory dependency. Now check if
+       * their register operands have any RAW, WAR, and WAW
+       * dependencies. If so, stop looking.
+       */
+      if (stop_here == false) {
+        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
+      }
+
+      if (stop_here == true) {
+        if (cu_->instruction_set == kX86) {
+          // Prevent stores from being sunk between ops that generate ccodes and
+          // ops that use them.
+          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
+          if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
+            check_lir = PREV_LIR(check_lir);
+            sink_distance--;
+          }
+        }
+        DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
+        /* Only sink store instructions */
+        if (sink_distance && !is_this_lir_load) {
+          LIR* new_store_lir =
+              static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+          *new_store_lir = *this_lir;
+          /*
+           * Stop point found - insert *before* the check_lir
+           * since the instruction list is scanned in the
+           * top-down order.
+           */
+          InsertLIRBefore(check_lir, new_store_lir);
+          this_lir->flags.is_nop = true;
+        }
+        break;
+      } else if (!check_lir->flags.is_nop) {
+        sink_distance++;
+      }
+    }
+  }
+}
+
+/*
+ * Perform a pass of bottom-up walk, from the second instruction in the
+ * superblock, to try to hoist loads to earlier slots.
+ */
+void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir)
+{
+  LIR* this_lir, *check_lir;
+  /*
+   * Store the list of independent instructions that can be hoisted past.
+   * Will decide the best place to insert later.
+   */
+  LIR* prev_inst_list[MAX_HOIST_DISTANCE];
+
+  /* Empty block */
+  if (head_lir == tail_lir) return;
+
+  /* Start from the second instruction */
+  for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+
+    if (is_pseudo_opcode(this_lir->opcode)) continue;
+
+    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+    /* Skip non-interesting instructions */
+    if ((this_lir->flags.is_nop == true) ||
+        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
+        !(target_flags & IS_LOAD)) {
+      continue;
+    }
+
+    uint64_t stop_use_all_mask = this_lir->use_mask;
+
+    if (cu_->instruction_set != kX86) {
+      /*
+       * Branches for null/range checks are marked with the true resource
+       * bits, and loads to Dalvik registers, constant pools, and non-alias
+       * locations are safe to be hoisted. So only mark the heap references
+       * conservatively here.
+       */
+      if (stop_use_all_mask & ENCODE_HEAP_REF) {
+        stop_use_all_mask |= GetPCUseDefEncoding();
+      }
+    }
+
+    /* Similar as above, but just check for pure register dependency */
+    uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
+    uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+
+    int next_slot = 0;
+    bool stop_here = false;
+
+    /* Try to hoist the load to a good spot */
+    for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
+
+      /*
+       * Skip already dead instructions (whose dataflow information is
+       * outdated and misleading).
+       */
+      if (check_lir->flags.is_nop) continue;
+
+      uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
+      uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
+      stop_here = false;
+
+      /* Potential WAR alias seen - check the exact relation */
+      if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+        /* We can fully disambiguate Dalvik references */
+        if (alias_condition == ENCODE_DALVIK_REG) {
+          /* Must alias or partually overlap */
+          if ((check_lir->alias_info == this_lir->alias_info) ||
+            IsDalvikRegisterClobbered(this_lir, check_lir)) {
+            stop_here = true;
+          }
+        /* Conservatively treat all heap refs as may-alias */
+        } else {
+          DCHECK_EQ(alias_condition, ENCODE_HEAP_REF);
+          stop_here = true;
+        }
+        /* Memory content may be updated. Stop looking now. */
+        if (stop_here) {
+          prev_inst_list[next_slot++] = check_lir;
+          break;
+        }
+      }
+
+      if (stop_here == false) {
+        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
+                     check_lir);
+      }
+
+      /*
+       * Store the dependent or non-pseudo/indepedent instruction to the
+       * list.
+       */
+      if (stop_here || !is_pseudo_opcode(check_lir->opcode)) {
+        prev_inst_list[next_slot++] = check_lir;
+        if (next_slot == MAX_HOIST_DISTANCE) break;
+      }
+
+      /* Found a new place to put the load - move it here */
+      if (stop_here == true) {
+        DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+        break;
+      }
+    }
+
+    /*
+     * Reached the top - use head_lir as the dependent marker as all labels
+     * are barriers.
+     */
+    if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
+      prev_inst_list[next_slot++] = head_lir;
+    }
+
+    /*
+     * At least one independent instruction is found. Scan in the reversed
+     * direction to find a beneficial slot.
+     */
+    if (next_slot >= 2) {
+      int first_slot = next_slot - 2;
+      int slot;
+      LIR* dep_lir = prev_inst_list[next_slot-1];
+      /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
+      if (!is_pseudo_opcode(dep_lir->opcode) &&
+        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
+        first_slot -= LDLD_DISTANCE;
+      }
+      /*
+       * Make sure we check slot >= 0 since first_slot may be negative
+       * when the loop is first entered.
+       */
+      for (slot = first_slot; slot >= 0; slot--) {
+        LIR* cur_lir = prev_inst_list[slot];
+        LIR* prev_lir = prev_inst_list[slot+1];
+
+        /* Check the highest instruction */
+        if (prev_lir->def_mask == ENCODE_ALL) {
+          /*
+           * If the first instruction is a load, don't hoist anything
+           * above it since it is unlikely to be beneficial.
+           */
+          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
+          /*
+           * If the remaining number of slots is less than LD_LATENCY,
+           * insert the hoisted load here.
+           */
+          if (slot < LD_LATENCY) break;
+        }
+
+        // Don't look across a barrier label
+        if ((prev_lir->opcode == kPseudoTargetLabel) ||
+            (prev_lir->opcode == kPseudoSafepointPC) ||
+            (prev_lir->opcode == kPseudoBarrier)) {
+          break;
+        }
+
+        /*
+         * Try to find two instructions with load/use dependency until
+         * the remaining instructions are less than LD_LATENCY.
+         */
+        bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
+            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
+        if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
+          break;
+        }
+      }
+
+      /* Found a slot to hoist to */
+      if (slot >= 0) {
+        LIR* cur_lir = prev_inst_list[slot];
+        LIR* new_load_lir =
+          static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+        *new_load_lir = *this_lir;
+        /*
+         * Insertion is guaranteed to succeed since check_lir
+         * is never the first LIR on the list
+         */
+        InsertLIRBefore(cur_lir, new_load_lir);
+        this_lir->flags.is_nop = true;
+      }
+    }
+  }
+}
+
+void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir)
+{
+  if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
+    ApplyLoadStoreElimination(head_lir, tail_lir);
+  }
+  if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
+    ApplyLoadHoisting(head_lir, tail_lir);
+  }
+}
+
+/*
+ * Nop any unconditional branches that go to the next instruction.
+ * Note: new redundant branches may be inserted later, and we'll
+ * use a check in final instruction assembly to nop those out.
+ */
+void Mir2Lir::RemoveRedundantBranches()
+{
+  LIR* this_lir;
+
+  for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
+
+    /* Branch to the next instruction */
+    if (IsUnconditionalBranch(this_lir)) {
+      LIR* next_lir = this_lir;
+
+      while (true) {
+        next_lir = NEXT_LIR(next_lir);
+
+        /*
+         * Is the branch target the next instruction?
+         */
+        if (next_lir == this_lir->target) {
+          this_lir->flags.is_nop = true;
+          break;
+        }
+
+        /*
+         * Found real useful stuff between the branch and the target.
+         * Need to explicitly check the last_lir_insn_ here because it
+         * might be the last real instruction.
+         */
+        if (!is_pseudo_opcode(next_lir->opcode) ||
+          (next_lir == last_lir_insn_))
+          break;
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips/README.mips b/compiler/dex/quick/mips/README.mips
new file mode 100644
index 0000000..061c157
--- /dev/null
+++ b/compiler/dex/quick/mips/README.mips
@@ -0,0 +1,57 @@
+               Notes on the Mips target (3/4/2012)
+               -----------------------------------
+
+Testing
+
+The initial implementation of Mips support in the compiler is untested on
+actual hardware, and as such should be expected to have many bugs.  However,
+the vast majority of code for Mips support is either shared with other
+tested targets, or was taken from the functional Mips JIT compiler.  The
+expectation is that when it is first tried out on actual hardware lots of
+small bugs will be flushed out, but it should not take long to get it
+solidly running.  The following areas are considered most likely to have
+problems that need to be addressed:
+
+    o Endianness.  Focus was on little-endian support, and if a big-endian
+      target is desired, you should pay particular attention to the
+      code generation for switch tables, fill array data, 64-bit
+      data handling and the register usage conventions.
+
+    o The memory model.  Verify that oatGenMemoryBarrier() generates the
+      appropriate flavor of sync.
+
+Register promotion
+
+The resource masks in the LIR structure are 64-bits wide, which is enough
+room to fully describe def/use info for Arm and x86 instructions.  However,
+the larger number of MIPS core and float registers render this too small.
+Currently, the workaround for this limitation is to avoid using floating
+point registers 16-31.  These are the callee-save registers, which therefore
+means that no floating point promotion is allowed.  Among the solution are:
+     o Expand the def/use mask (which, unfortunately, is a significant change)
+     o The Arm target uses 52 of the 64 bits, so we could support float
+       registers 16-27 without much effort.
+     o We could likely assign the 4 non-register bits (kDalvikReg, kLiteral,
+       kHeapRef & kMustNotAlias) to positions occuped by MIPS registers that
+       don't need def/use bits because they are never modified by code
+       subject to scheduling: r_K0, r_K1, r_SP, r_ZERO, r_S1 (rSELF).
+
+Branch delay slots
+
+Little to no attempt was made to fill branch delay slots.  Branch
+instructions in the encoding map are given a length of 8 bytes to include
+an implicit NOP.  It should not be too difficult to provide a slot-filling
+pass following successful assembly, but thought should be given to the
+design.  Branches are currently treated as scheduling barriers.  One
+simple solution would be to copy the instruction at branch targets to the
+slot and adjust the displacement.  However, given that code expansion is
+already a problem it would be preferable to use a more sophisticated
+scheduling solution.
+
+Code expansion
+
+Code expansion for the MIPS target is significantly higher than we see
+for Arm and x86.  It might make sense to replace the inline code generation
+for some of the more verbose Dalik byte codes with subroutine calls to
+shared helper functions.
+
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
new file mode 100644
index 0000000..2482aa4
--- /dev/null
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: MipsOpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+                     k3, k3s, k3e, flags, name, fmt, size) \
+        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ *     0 -> operands[0] (dest)
+ *     1 -> operands[1] (src1)
+ *     2 -> operands[2] (src2)
+ *     3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ *     h -> 4-digit hex
+ *     d -> decimal
+ *     E -> decimal*4
+ *     F -> decimal*2
+ *     c -> branch condition (beq, bne, etc.)
+ *     t -> pc-relative target
+ *     T -> pc-region target
+ *     u -> 1st half of bl[x] target
+ *     v -> 2nd half ob bl[x] target
+ *     R -> register list
+ *     s -> single precision floating point register
+ *     S -> double precision floating point register
+ *     m -> Thumb2 modified immediate
+ *     n -> complimented Thumb2 modified immediate
+ *     M -> Thumb2 16-bit zero-extended immediate
+ *     b -> 4-digit binary
+ *     N -> append a NOP
+ *
+ *  [!] escape.  To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots.  All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop.  This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
+    ENCODING_MAP(kMips32BitData, 0x00000000,
+                 kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "data", "0x!0h(!0d)", 4),
+    ENCODING_MAP(kMipsAddiu, 0x24000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "addiu", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsAddu, 0x00000021,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "addu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsAnd, 0x00000024,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "and", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsAndi, 0x30000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "andi", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsB, 0x10000000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+                 "b", "!0t!0N", 8),
+    ENCODING_MAP(kMipsBal, 0x04110000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
+                 NEEDS_FIXUP, "bal", "!0t!0N", 8),
+    ENCODING_MAP(kMipsBeq, 0x10000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+                 NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
+    ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBgez, 0x04010000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBgtz, 0x1C000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBlez, 0x18000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBltz, 0x04000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMipsBne, 0x14000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+                 NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
+    ENCODING_MAP(kMipsDiv, 0x0000001a,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtBitBlt, 25, 21,
+                 kFmtBitBlt, 20, 16, IS_QUAD_OP | REG_DEF01 | REG_USE23,
+                 "div", "!2r,!3r", 4),
+#if __mips_isa_rev>=2
+    ENCODING_MAP(kMipsExt, 0x7c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+                 kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+                 "ext", "!0r,!1r,!2d,!3D", 4),
+#endif
+    ENCODING_MAP(kMipsJal, 0x0c000000,
+                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+                 "jal", "!0T(!0E)!0N", 8),
+    ENCODING_MAP(kMipsJalr, 0x00000009,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+                 "jalr", "!0r,!1r!0N", 8),
+    ENCODING_MAP(kMipsJr, 0x00000008,
+                 kFmtBitBlt, 25, 21, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "jr", "!0r!0N", 8),
+    ENCODING_MAP(kMipsLahi, 0x3C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lahi/lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMipsLalo, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsLui, 0x3C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMipsLb, 0x80000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lb", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsLbu, 0x90000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lbu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsLh, 0x84000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lh", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsLhu, 0x94000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lhu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsLw, 0x8C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lw", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsMfhi, 0x00000010,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfhi", "!0r", 4),
+    ENCODING_MAP(kMipsMflo, 0x00000012,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mflo", "!0r", 4),
+    ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "move", "!0r,!1r", 4),
+    ENCODING_MAP(kMipsMovz, 0x0000000a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "movz", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsMul, 0x70000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsNop, 0x00000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "nop", ";", 4),
+    ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "nor", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsOr, 0x00000025,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "or", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsOri, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "ori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsPref, 0xCC000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE2,
+                 "pref", "!0d,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsSb, 0xA0000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sb", "!0r,!1d(!2r)", 4),
+#if __mips_isa_rev>=2
+    ENCODING_MAP(kMipsSeb, 0x7c000420,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seb", "!0r,!1r", 4),
+    ENCODING_MAP(kMipsSeh, 0x7c000620,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seh", "!0r,!1r", 4),
+#endif
+    ENCODING_MAP(kMipsSh, 0xA4000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sh", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsSll, 0x00000000,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sll", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsSllv, 0x00000004,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sllv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSlt, 0x0000002a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "slt", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSlti, 0x28000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "slti", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsSltu, 0x0000002b,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sltu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSra, 0x00000003,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sra", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsSrav, 0x00000007,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srav", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSrl, 0x00000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "srl", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsSrlv, 0x00000006,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srlv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "subu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsSw, 0xAC000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sw", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsXor, 0x00000026,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "xor", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMipsXori, 0x38000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "xori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMipsFadds, 0x46000000,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFsubs, 0x46000001,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFmuls, 0x46000002,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFdivs, 0x46000003,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMipsFaddd, 0x46200000,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFsubd, 0x46200001,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFmuld, 0x46200002,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFdivd, 0x46200003,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.w", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFcvtds, 0x46000021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.s", "!0S,!1s", 4),
+    ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.w", "!0S,!1s", 4),
+    ENCODING_MAP(kMipsFcvtws, 0x46000024,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMipsFmovs, 0x46000006,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMipsFmovd, 0x46200006,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.d", "!0S,!1S", 4),
+    ENCODING_MAP(kMipsFlwc1, 0xC4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwc1", "!0s,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFldc1, 0xD4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ldc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFswc1, 0xE4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "swc1", "!0s,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsFsdc1, 0xF4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sdc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMipsMfc1, 0x44000000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMipsMtc1, 0x44800000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "mtc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMipsDelta, 0x27e00000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
+                 NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMipsDeltaHi, 0x3C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
+                 "lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMipsDeltaLo, 0x34000000,
+                 kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
+                 "ori", "!0r,!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMipsCurrPC, 0x04110001,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+                 "addiu", "ra,pc,8", 4),
+    ENCODING_MAP(kMipsSync, 0x0000000f,
+                 kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "sync", ";", 4),
+    ENCODING_MAP(kMipsUndefined, 0x64000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "undefined", "", 4),
+};
+
+
+/*
+ * Convert a short-form branch to long form.  Hopefully, this won't happen
+ * very often because the PIC sequence is especially unfortunate.
+ *
+ * Orig conditional branch
+ * -----------------------
+ *      beq  rs,rt,target
+ *
+ * Long conditional branch
+ * -----------------------
+ *      bne  rs,rt,hop
+ *      bal  .+8   ; r_RA <- anchor
+ *      lui  r_AT, ((target-anchor) >> 16)
+ * anchor:
+ *      ori  r_AT, r_AT, ((target-anchor) & 0xffff)
+ *      addu r_AT, r_AT, r_RA
+ *      jr   r_AT
+ * hop:
+ *
+ * Orig unconditional branch
+ * -------------------------
+ *      b target
+ *
+ * Long unconditional branch
+ * -----------------------
+ *      bal  .+8   ; r_RA <- anchor
+ *      lui  r_AT, ((target-anchor) >> 16)
+ * anchor:
+ *      ori  r_AT, r_AT, ((target-anchor) & 0xffff)
+ *      addu r_AT, r_AT, r_RA
+ *      jr   r_AT
+ *
+ *
+ * NOTE: An out-of-range bal isn't supported because it should
+ * never happen with the current PIC model.
+ */
+void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir)
+{
+  // For conditional branches we'll need to reverse the sense
+  bool unconditional = false;
+  int opcode = lir->opcode;
+  int dalvik_offset = lir->dalvik_offset;
+  switch (opcode) {
+    case kMipsBal:
+      LOG(FATAL) << "long branch and link unsupported";
+    case kMipsB:
+      unconditional = true;
+      break;
+    case kMipsBeq:  opcode = kMipsBne; break;
+    case kMipsBne:  opcode = kMipsBeq; break;
+    case kMipsBeqz: opcode = kMipsBnez; break;
+    case kMipsBgez: opcode = kMipsBltz; break;
+    case kMipsBgtz: opcode = kMipsBlez; break;
+    case kMipsBlez: opcode = kMipsBgtz; break;
+    case kMipsBltz: opcode = kMipsBgez; break;
+    case kMipsBnez: opcode = kMipsBeqz; break;
+    default:
+      LOG(FATAL) << "Unexpected branch kind " << opcode;
+  }
+  LIR* hop_target = NULL;
+  if (!unconditional) {
+    hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
+    LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
+                            lir->operands[1], 0, 0, 0, hop_target);
+    InsertLIRBefore(lir, hop_branch);
+  }
+  LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
+  InsertLIRBefore(lir, curr_pc);
+  LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
+  LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0,
+                        reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+  InsertLIRBefore(lir, delta_hi);
+  InsertLIRBefore(lir, anchor);
+  LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0,
+                        reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+  InsertLIRBefore(lir, delta_lo);
+  LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
+  InsertLIRBefore(lir, addu);
+  LIR* jr = RawLIR(dalvik_offset, kMipsJr, r_AT);
+  InsertLIRBefore(lir, jr);
+  if (!unconditional) {
+    InsertLIRBefore(lir, hop_target);
+  }
+  lir->flags.is_nop = true;
+}
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.  In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr)
+{
+  LIR *lir;
+  AssemblerStatus res = kSuccess;  // Assume success
+
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    if (lir->opcode < 0) {
+      continue;
+    }
+
+
+    if (lir->flags.is_nop) {
+      continue;
+    }
+
+    if (lir->flags.pcRelFixup) {
+      if (lir->opcode == kMipsDelta) {
+        /*
+         * The "Delta" pseudo-ops load the difference between
+         * two pc-relative locations into a the target register
+         * found in operands[0].  The delta is determined by
+         * (label2 - label1), where label1 is a standard
+         * kPseudoTargetLabel and is stored in operands[2].
+         * If operands[3] is null, then label2 is a kPseudoTargetLabel
+         * and is found in lir->target.  If operands[3] is non-NULL,
+         * then it is a Switch/Data table.
+         */
+        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
+          // Fits
+          lir->operands[1] = delta;
+        } else {
+          // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
+          LIR *new_delta_hi =
+              RawLIR(lir->dalvik_offset, kMipsDeltaHi,
+                     lir->operands[0], 0, lir->operands[2],
+                     lir->operands[3], 0, lir->target);
+          InsertLIRBefore(lir, new_delta_hi);
+          LIR *new_delta_lo =
+              RawLIR(lir->dalvik_offset, kMipsDeltaLo,
+                     lir->operands[0], 0, lir->operands[2],
+                     lir->operands[3], 0, lir->target);
+          InsertLIRBefore(lir, new_delta_lo);
+          LIR *new_addu =
+              RawLIR(lir->dalvik_offset, kMipsAddu,
+                     lir->operands[0], lir->operands[0], r_RA);
+          InsertLIRBefore(lir, new_addu);
+          lir->flags.is_nop = true;
+          res = kRetryAll;
+        }
+      } else if (lir->opcode == kMipsDeltaLo) {
+        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        lir->operands[1] = delta & 0xffff;
+      } else if (lir->opcode == kMipsDeltaHi) {
+        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        lir->operands[1] = (delta >> 16) & 0xffff;
+      } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[0] = delta >> 2;
+        }
+      } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[1] = delta >> 2;
+        }
+      } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+        LIR *target_lir = lir->target;
+        uintptr_t pc = lir->offset + 4;
+        uintptr_t target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[2] = delta >> 2;
+        }
+      } else if (lir->opcode == kMipsJal) {
+        uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
+        uintptr_t target = lir->operands[0];
+        /* ensure PC-region branch can be used */
+        DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
+        if (target & 0x3) {
+          LOG(FATAL) << "Jump target not multiple of 4: " << target;
+        }
+        lir->operands[0] =  target >> 2;
+      } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
+        LIR *target_lir = lir->target;
+        uintptr_t target = start_addr + target_lir->offset;
+        lir->operands[1] = target >> 16;
+      } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
+        LIR *target_lir = lir->target;
+        uintptr_t target = start_addr + target_lir->offset;
+        lir->operands[2] = lir->operands[2] + target;
+      }
+    }
+
+    /*
+     * If one of the pc-relative instructions expanded we'll have
+     * to make another pass.  Don't bother to fully assemble the
+     * instruction.
+     */
+    if (res != kSuccess) {
+      continue;
+    }
+    const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+    uint32_t bits = encoder->skeleton;
+    int i;
+    for (i = 0; i < 4; i++) {
+      uint32_t operand;
+      uint32_t value;
+      operand = lir->operands[i];
+      switch (encoder->field_loc[i].kind) {
+        case kFmtUnused:
+          break;
+        case kFmtBitBlt:
+          if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
+            value = operand;
+          } else {
+            value = (operand << encoder->field_loc[i].start) &
+                ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          }
+          bits |= value;
+          break;
+        case kFmtBlt5_2:
+          value = (operand & 0x1f);
+          bits |= (value << encoder->field_loc[i].start);
+          bits |= (value << encoder->field_loc[i].end);
+          break;
+        case kFmtDfp: {
+          DCHECK(MIPS_DOUBLEREG(operand));
+          DCHECK_EQ((operand & 0x1), 0U);
+          value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+              ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          bits |= value;
+          break;
+        }
+        case kFmtSfp:
+          DCHECK(MIPS_SINGLEREG(operand));
+          value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+              ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          bits |= value;
+          break;
+        default:
+          LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
+      }
+    }
+    // We only support little-endian MIPS.
+    code_buffer_.push_back(bits & 0xff);
+    code_buffer_.push_back((bits >> 8) & 0xff);
+    code_buffer_.push_back((bits >> 16) & 0xff);
+    code_buffer_.push_back((bits >> 24) & 0xff);
+    // TUNING: replace with proper delay slot handling
+    if (encoder->size == 8) {
+      const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
+      uint32_t bits = encoder->skeleton;
+      code_buffer_.push_back(bits & 0xff);
+      code_buffer_.push_back((bits >> 8) & 0xff);
+      code_buffer_.push_back((bits >> 16) & 0xff);
+      code_buffer_.push_back((bits >> 24) & 0xff);
+    }
+  }
+  return res;
+}
+
+int MipsMir2Lir::GetInsnSize(LIR* lir)
+{
+  return EncodingMap[lir->opcode].size;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
new file mode 100644
index 0000000..eb0302e
--- /dev/null
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips ISA */
+
+#include "codegen_mips.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                 SpecialCaseHandler special_case)
+{
+    // TODO
+}
+
+/*
+ * The lack of pc-relative loads on Mips presents somewhat of a challenge
+ * for our PIC switch table strategy.  To materialize the current location
+ * we'll do a dummy JAL and reference our tables using r_RA as the
+ * base register.  Note that r_RA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table.  We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ *   ori   rEnd, r_ZERO, #table_size  ; size in bytes
+ *   jal   BaseLabel         ; stores "return address" (BaseLabel) in r_RA
+ *   nop                     ; opportunistically fill
+ * BaseLabel:
+ *   addiu rBase, r_RA, <table> - <BaseLabel>  ; table relative to BaseLabel
+     addu  rEnd, rEnd, rBase                   ; end of table
+ *   lw    r_val, [rSP, v_reg_off]                ; Test Value
+ * loop:
+ *   beq   rBase, rEnd, done
+ *   lw    r_key, 0(rBase)
+ *   addu  rBase, 8
+ *   bne   r_val, r_key, loop
+ *   lw    r_disp, -4(rBase)
+ *   addu  r_RA, r_disp
+ *   jr    r_RA
+ * done:
+ *
+ */
+void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpSparseSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int elements = table[1];
+  tab_rec->targets =
+      static_cast<LIR**>(arena_->NewMem(elements * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // The table is composed of 8-byte key/disp pairs
+  int byte_size = elements * 8;
+
+  int size_hi = byte_size >> 16;
+  int size_lo = byte_size & 0xffff;
+
+  int rEnd = AllocTemp();
+  if (size_hi) {
+    NewLIR2(kMipsLui, rEnd, size_hi);
+  }
+  // Must prevent code motion for the curr pc pair
+  GenBarrier();  // Scheduling barrier
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
+  // Now, fill the branch delay slot
+  if (size_hi) {
+    NewLIR3(kMipsOri, rEnd, rEnd, size_lo);
+  } else {
+    NewLIR3(kMipsOri, rEnd, r_ZERO, size_lo);
+  }
+  GenBarrier();  // Scheduling barrier
+
+  // Construct BaseLabel and set up table base register
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
+  // Remember base label so offsets can be computed later
+  tab_rec->anchor = base_label;
+  int rBase = AllocTemp();
+  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+          reinterpret_cast<uintptr_t>(tab_rec));
+  OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
+
+  // Grab switch test value
+  rl_src = LoadValue(rl_src, kCoreReg);
+
+  // Test loop
+  int r_key = AllocTemp();
+  LIR* loop_label = NewLIR0(kPseudoTargetLabel);
+  LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
+  LoadWordDisp(rBase, 0, r_key);
+  OpRegImm(kOpAdd, rBase, 8);
+  OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
+  int r_disp = AllocTemp();
+  LoadWordDisp(rBase, -4, r_disp);
+  OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
+  OpReg(kOpBx, r_RA);
+
+  // Loop exit
+  LIR* exit_label = NewLIR0(kPseudoTargetLabel);
+  exit_branch->target = exit_label;
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ *   lw    r_val
+ *   jal   BaseLabel         ; stores "return address" (BaseLabel) in r_RA
+ *   nop                     ; opportunistically fill
+ *   [subiu r_val, bias]      ; Remove bias if low_val != 0
+ *   bound check -> done
+ *   lw    r_disp, [r_RA, r_val]
+ *   addu  r_RA, r_disp
+ *   jr    r_RA
+ * done:
+ */
+void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpPackedSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // Get the switch value
+  rl_src = LoadValue(rl_src, kCoreReg);
+
+  // Prepare the bias.  If too big, handle 1st stage here
+  int low_key = s4FromSwitchData(&table[2]);
+  bool large_bias = false;
+  int r_key;
+  if (low_key == 0) {
+    r_key = rl_src.low_reg;
+  } else if ((low_key & 0xffff) != low_key) {
+    r_key = AllocTemp();
+    LoadConstant(r_key, low_key);
+    large_bias = true;
+  } else {
+    r_key = AllocTemp();
+  }
+
+  // Must prevent code motion for the curr pc pair
+  GenBarrier();
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
+  // Now, fill the branch delay slot with bias strip
+  if (low_key == 0) {
+    NewLIR0(kMipsNop);
+  } else {
+    if (large_bias) {
+      OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
+    } else {
+      OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
+    }
+  }
+  GenBarrier();  // Scheduling barrier
+
+  // Construct BaseLabel and set up table base register
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
+  // Remember base label so offsets can be computed later
+  tab_rec->anchor = base_label;
+
+  // Bounds check - if < 0 or >= size continue following switch
+  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+
+  // Materialize the table base pointer
+  int rBase = AllocTemp();
+  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+          reinterpret_cast<uintptr_t>(tab_rec));
+
+  // Load the displacement from the switch table
+  int r_disp = AllocTemp();
+  LoadBaseIndexed(rBase, r_key, r_disp, 2, kWord);
+
+  // Add to r_AP and go
+  OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
+  OpReg(kOpBx, r_RA);
+
+  /* branch_over target here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ *  ushort ident = 0x0300   magic value
+ *  ushort width            width of each element in the table
+ *  uint   size             number of elements in the table
+ *  ubyte  data[size*width] table of data values (may contain a single-byte
+ *                          padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  // Add the table to the list - we'll process it later
+  FillArrayData *tab_rec =
+      reinterpret_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                      ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  uint16_t width = tab_rec->table[1];
+  uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+  tab_rec->size = (size * width) + 8;
+
+  fill_array_data_.Insert(tab_rec);
+
+  // Making a call - use explicit registers
+  FlushAllRegs();   /* Everything to home location */
+  LockCallTemps();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);
+
+  // Must prevent code motion for the curr pc pair
+  GenBarrier();
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
+  // Now, fill the branch delay slot with the helper load
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+  GenBarrier();  // Scheduling barrier
+
+  // Construct BaseLabel and set up table base register
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
+
+  // Materialize a pointer to the fill data image
+  NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
+          reinterpret_cast<uintptr_t>(tab_rec));
+
+  // And go...
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt); // ( array*, fill_data* )
+  MarkSafepointPC(call_inst);
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+  // Go expensive route - artLockObjectFromCode(self, obj);
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt);
+  MarkSafepointPC(call_inst);
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+  // Go expensive route - UnlockObjectFromCode(obj);
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt);
+  MarkSafepointPC(call_inst);
+}
+
+void MipsMir2Lir::GenMoveException(RegLocation rl_dest)
+{
+  int ex_offset = Thread::ExceptionOffset().Int32Value();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int reset_reg = AllocTemp();
+  LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
+  LoadConstant(reset_reg, 0);
+  StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
+  FreeTemp(reset_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
+{
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  LoadWordDisp(rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
+                   kUnsignedByte);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
+}
+void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
+{
+  int spill_count = num_core_spills_ + num_fp_spills_;
+  /*
+   * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live.  Let the register
+   * allocation mechanism know so it doesn't try to use any of them when
+   * expanding the frame or flushing.  This leaves the utility
+   * code with a single temp: r12.  This should be enough.
+   */
+  LockTemp(rMIPS_ARG0);
+  LockTemp(rMIPS_ARG1);
+  LockTemp(rMIPS_ARG2);
+  LockTemp(rMIPS_ARG3);
+
+  /*
+   * We can safely skip the stack overflow check if we're
+   * a leaf *and* our frame size < fudge factor.
+   */
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+      (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes));
+  NewLIR0(kPseudoMethodEntry);
+  int check_reg = AllocTemp();
+  int new_sp = AllocTemp();
+  if (!skip_overflow_check) {
+    /* Load stack limit */
+    LoadWordDisp(rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
+  }
+  /* Spill core callee saves */
+  SpillCoreRegs();
+  /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+  DCHECK_EQ(num_fp_spills_, 0);
+  if (!skip_overflow_check) {
+    OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_size_ - (spill_count * 4));
+    GenRegRegCheck(kCondCc, new_sp, check_reg, kThrowStackOverflow);
+    OpRegCopy(rMIPS_SP, new_sp);     // Establish stack
+  } else {
+    OpRegImm(kOpSub, rMIPS_SP, frame_size_ - (spill_count * 4));
+  }
+
+  FlushIns(ArgLocs, rl_method);
+
+  FreeTemp(rMIPS_ARG0);
+  FreeTemp(rMIPS_ARG1);
+  FreeTemp(rMIPS_ARG2);
+  FreeTemp(rMIPS_ARG3);
+}
+
+void MipsMir2Lir::GenExitSequence()
+{
+  /*
+   * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
+   * allocated by the register utilities as temps.
+   */
+  LockTemp(rMIPS_RET0);
+  LockTemp(rMIPS_RET1);
+
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
+  OpReg(kOpBx, r_RA);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
new file mode 100644
index 0000000..9723b899
--- /dev/null
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
+#define ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
+
+#include "dex/compiler_internals.h"
+#include "mips_lir.h"
+
+namespace art {
+
+class MipsMir2Lir : public Mir2Lir {
+  public:
+
+    MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+    // Required for target - codegen utilities.
+    bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    int LoadHelper(int offset);
+    LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    LIR* LoadConstantNoClobber(int r_dest, int value);
+    LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    void MarkGCCard(int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    bool IsFpReg(int reg);
+    bool SameRegType(int reg1, int reg2);
+    int AllocTypedTemp(bool fp_hint, int reg_class);
+    int AllocTypedTempPair(bool fp_hint, int reg_class);
+    int S2d(int low_reg, int high_reg);
+    int TargetReg(SpecialTargetRegister reg);
+    RegisterInfo* GetRegInfo(int reg);
+    RegLocation GetReturnAlt();
+    RegLocation GetReturnWideAlt();
+    RegLocation LocCReturn();
+    RegLocation LocCReturnDouble();
+    RegLocation LocCReturnFloat();
+    RegLocation LocCReturnWide();
+    uint32_t FpRegMask();
+    uint64_t GetRegMaskCommon(int reg);
+    void AdjustSpillMask();
+    void ClobberCalleeSave();
+    void FlushReg(int reg);
+    void FlushRegWide(int reg1, int reg2);
+    void FreeCallTemps();
+    void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    void LockCallTemps();
+    void MarkPreservedSingle(int v_reg, int reg);
+    void CompilerInitializeRegAlloc();
+
+    // Required for target - miscellaneous.
+    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    void SetupTargetResourceMasks(LIR* lir);
+    const char* GetTargetInstFmt(int opcode);
+    const char* GetTargetInstName(int opcode);
+    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    uint64_t GetPCUseDefEncoding();
+    uint64_t GetTargetInstFlags(int opcode);
+    int GetInsnSize(LIR* lir);
+    bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2);
+    void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+                                RegLocation rl_src, int scale);
+    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_dest, int scale);
+    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    bool GenInlinedSqrt(CallInfo* info);
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenDivZeroCheck(int reg_lo, int reg_hi);
+    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    void GenExitSequence();
+    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    void GenSelect(BasicBlock* bb, MIR* mir);
+    void GenMemBarrier(MemBarrierKind barrier_kind);
+    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    void GenMoveException(RegLocation rl_dest);
+    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit);
+    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+
+    // Required for target - single operation generators.
+    LIR* OpUnconditionalBranch(LIR* target);
+    LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    LIR* OpFpRegCopy(int r_dest, int r_src);
+    LIR* OpIT(ConditionCode cond, const char* guide);
+    LIR* OpMem(OpKind op, int rBase, int disp);
+    LIR* OpPcRelLoad(int reg, LIR* target);
+    LIR* OpReg(OpKind op, int r_dest_src);
+    LIR* OpRegCopy(int r_dest, int r_src);
+    LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    LIR* OpTestSuspend(LIR* target);
+    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpVldm(int rBase, int count);
+    LIR* OpVstm(int rBase, int count);
+    void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    void OpTlsCmp(int offset, int val);
+
+    LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+                          int s_reg);
+    LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+    void SpillCoreRegs();
+    void UnSpillCoreRegs();
+    static const MipsEncodingMap EncodingMap[kMipsLast];
+    bool InexpensiveConstantInt(int32_t value);
+    bool InexpensiveConstantFloat(int32_t value);
+    bool InexpensiveConstantLong(int64_t value);
+    bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void ConvertShortToLongBranch(LIR* lir);
+
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
new file mode 100644
index 0000000..8581d5b
--- /dev/null
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  int op = kMipsNop;
+  RegLocation rl_result;
+
+  /*
+   * Don't attempt to optimize register usage since these opcodes call out to
+   * the handlers.
+   */
+  switch (opcode) {
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_FLOAT:
+      op = kMipsFadds;
+      break;
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT:
+      op = kMipsFsubs;
+      break;
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT:
+      op = kMipsFdivs;
+      break;
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT:
+      op = kMipsFmuls;
+      break;
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_FLOAT:
+      GenNegFloat(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  int op = kMipsNop;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::ADD_DOUBLE:
+      op = kMipsFaddd;
+      break;
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE:
+      op = kMipsFsubd;
+      break;
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE:
+      op = kMipsFdivd;
+      break;
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE:
+      op = kMipsFmuld;
+      break;
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_DOUBLE:
+      GenNegDouble(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unpexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
+  DCHECK(rl_src1.wide);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
+  DCHECK(rl_src2.wide);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_result.wide);
+  NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+          S2d(rl_src2.low_reg, rl_src2.high_reg));
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                                RegLocation rl_src)
+{
+  int op = kMipsNop;
+  int src_reg;
+  RegLocation rl_result;
+  switch (opcode) {
+    case Instruction::INT_TO_FLOAT:
+      op = kMipsFcvtsw;
+      break;
+    case Instruction::DOUBLE_TO_FLOAT:
+      op = kMipsFcvtsd;
+      break;
+    case Instruction::FLOAT_TO_DOUBLE:
+      op = kMipsFcvtds;
+      break;
+    case Instruction::INT_TO_DOUBLE:
+      op = kMipsFcvtdw;
+      break;
+    case Instruction::FLOAT_TO_INT:
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+      return;
+    case Instruction::DOUBLE_TO_INT:
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+      return;
+    case Instruction::LONG_TO_DOUBLE:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      return;
+    case Instruction::FLOAT_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      return;
+    case Instruction::LONG_TO_FLOAT:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      return;
+    case Instruction::DOUBLE_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  if (rl_src.wide) {
+    rl_src = LoadValueWide(rl_src, kFPReg);
+    src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+  } else {
+    rl_src = LoadValue(rl_src, kFPReg);
+    src_reg = rl_src.low_reg;
+  }
+  if (rl_dest.wide) {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
+{
+  bool wide = true;
+  int offset = -1; // Make gcc happy.
+
+  switch (opcode) {
+    case Instruction::CMPL_FLOAT:
+      offset = ENTRYPOINT_OFFSET(pCmplFloat);
+      wide = false;
+      break;
+    case Instruction::CMPG_FLOAT:
+      offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+      wide = false;
+      break;
+    case Instruction::CMPL_DOUBLE:
+      offset = ENTRYPOINT_OFFSET(pCmplDouble);
+      break;
+    case Instruction::CMPG_DOUBLE:
+      offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  FlushAllRegs();
+  LockCallTemps();
+  if (wide) {
+    LoadValueDirectWideFixed(rl_src1, rMIPS_FARG0, rMIPS_FARG1);
+    LoadValueDirectWideFixed(rl_src2, rMIPS_FARG2, rMIPS_FARG3);
+  } else {
+    LoadValueDirectFixed(rl_src1, rMIPS_FARG0);
+    LoadValueDirectFixed(rl_src2, rMIPS_FARG2);
+  }
+  int r_tgt = LoadHelper(offset);
+  // NOTE: not a safepoint
+  OpReg(kOpBlx, r_tgt);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
+                                bool gt_bias, bool is_double)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
+void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+  StoreValue(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
+{
+  // TODO: need Mips implementation
+  return false;
+}
+
+} //  namespace art
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
new file mode 100644
index 0000000..8bfc4e1
--- /dev/null
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips ISA */
+
+#include "codegen_mips.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+#include "mirror/array.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+/*
+ * Compare two 64-bit values
+ *    x = y     return  0
+ *    x < y     return -1
+ *    x > y     return  1
+ *
+ *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+ *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+ *    subu  res, t0, t1             # res = -1:1:0 for [ < > = ]
+ *    bnez  res, finish
+ *    sltu  t0, x.lo, y.lo
+ *    sgtu  r1, x.lo, y.lo
+ *    subu  res, t0, t1
+ * finish:
+ *
+ */
+void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  int t0 = AllocTemp();
+  int t1 = AllocTemp();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
+  NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
+  NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+  LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL);
+  NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
+  NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
+  NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+  FreeTemp(t0);
+  FreeTemp(t1);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch->target = target;
+  StoreValue(rl_dest, rl_result);
+}
+
+LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
+                              LIR* target)
+{
+  LIR* branch;
+  MipsOpCode slt_op;
+  MipsOpCode br_op;
+  bool cmp_zero = false;
+  bool swapped = false;
+  switch (cond) {
+    case kCondEq:
+      br_op = kMipsBeq;
+      cmp_zero = true;
+      break;
+    case kCondNe:
+      br_op = kMipsBne;
+      cmp_zero = true;
+      break;
+    case kCondCc:
+      slt_op = kMipsSltu;
+      br_op = kMipsBnez;
+      break;
+    case kCondCs:
+      slt_op = kMipsSltu;
+      br_op = kMipsBeqz;
+      break;
+    case kCondGe:
+      slt_op = kMipsSlt;
+      br_op = kMipsBeqz;
+      break;
+    case kCondGt:
+      slt_op = kMipsSlt;
+      br_op = kMipsBnez;
+      swapped = true;
+      break;
+    case kCondLe:
+      slt_op = kMipsSlt;
+      br_op = kMipsBeqz;
+      swapped = true;
+      break;
+    case kCondLt:
+      slt_op = kMipsSlt;
+      br_op = kMipsBnez;
+      break;
+    case kCondHi:  // Gtu
+      slt_op = kMipsSltu;
+      br_op = kMipsBnez;
+      swapped = true;
+      break;
+    default:
+      LOG(FATAL) << "No support for ConditionCode: " << cond;
+      return NULL;
+  }
+  if (cmp_zero) {
+    branch = NewLIR2(br_op, src1, src2);
+  } else {
+    int t_reg = AllocTemp();
+    if (swapped) {
+      NewLIR3(slt_op, t_reg, src2, src1);
+    } else {
+      NewLIR3(slt_op, t_reg, src1, src2);
+    }
+    branch = NewLIR1(br_op, t_reg);
+    FreeTemp(t_reg);
+  }
+  branch->target = target;
+  return branch;
+}
+
+LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
+                                 int check_value, LIR* target)
+{
+  LIR* branch;
+  if (check_value != 0) {
+    // TUNING: handle s16 & kCondLt/Mi case using slti
+    int t_reg = AllocTemp();
+    LoadConstant(t_reg, check_value);
+    branch = OpCmpBranch(cond, reg, t_reg, target);
+    FreeTemp(t_reg);
+    return branch;
+  }
+  MipsOpCode opc;
+  switch (cond) {
+    case kCondEq: opc = kMipsBeqz; break;
+    case kCondGe: opc = kMipsBgez; break;
+    case kCondGt: opc = kMipsBgtz; break;
+    case kCondLe: opc = kMipsBlez; break;
+    //case KCondMi:
+    case kCondLt: opc = kMipsBltz; break;
+    case kCondNe: opc = kMipsBnez; break;
+    default:
+      // Tuning: use slti when applicable
+      int t_reg = AllocTemp();
+      LoadConstant(t_reg, check_value);
+      branch = OpCmpBranch(cond, reg, t_reg, target);
+      FreeTemp(t_reg);
+      return branch;
+  }
+  branch = NewLIR1(opc, reg);
+  branch->target = target;
+  return branch;
+}
+
+LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
+{
+  if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
+    return OpFpRegCopy(r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
+            r_dest, r_src);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src)
+{
+  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
+  return res;
+}
+
+void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
+                                int src_hi)
+{
+  bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
+  bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi);
+  assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi));
+  assert(MIPS_FPREG(dest_lo) == MIPS_FPREG(dest_hi));
+  if (dest_fp) {
+    if (src_fp) {
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+    } else {
+       /* note the operands are swapped for the mtc1 instr */
+      NewLIR2(kMipsMtc1, src_lo, dest_lo);
+      NewLIR2(kMipsMtc1, src_hi, dest_hi);
+    }
+  } else {
+    if (src_fp) {
+      NewLIR2(kMipsMfc1, dest_lo, src_lo);
+      NewLIR2(kMipsMfc1, dest_hi, src_hi);
+    } else {
+      // Handle overlap
+      if (src_hi == dest_lo) {
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+      } else {
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+      }
+    }
+  }
+}
+
+void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
+void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
+LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code,
+                    int reg1, int base, int offset, ThrowKind kind)
+{
+  LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
+  return NULL;
+}
+
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
+                                    bool is_div)
+{
+  NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (is_div) {
+    NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+  } else {
+    NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+  }
+  return rl_result;
+}
+
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
+                                       bool is_div)
+{
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsAddiu, t_reg, r_ZERO, lit);
+  NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (is_div) {
+    NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+  } else {
+    NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+  }
+  FreeTemp(t_reg);
+  return rl_result;
+}
+
+void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
+{
+  LOG(FATAL) << "Unexpected use of OpLea for Arm";
+}
+
+void MipsMir2Lir::OpTlsCmp(int offset, int val)
+{
+  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
+}
+
+bool MipsMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+LIR* MipsMir2Lir::OpPcRelLoad(int reg, LIR* target) {
+  LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::OpVldm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVldm for Mips";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::OpVstm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVstm for Mips";
+  return NULL;
+}
+
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                                RegLocation rl_result, int lit,
+                                                int first_bit, int second_bit)
+{
+  int t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+  FreeTemp(t_reg);
+  if (first_bit != 0) {
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+  }
+}
+
+void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
+{
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
+  FreeTemp(t_reg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* MipsMir2Lir::OpTestSuspend(LIR* target)
+{
+  OpRegImm(kOpSub, rMIPS_SUSPEND, 1);
+  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
+}
+
+// Decrement register and branch on condition
+LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
+{
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool MipsMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                     RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
+  return false;
+}
+
+LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide)
+{
+  LOG(FATAL) << "Unexpected use of OpIT in Mips";
+  return NULL;
+}
+
+void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
+}
+
+void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  /*
+   *  [v1 v0] =  [a1 a0] + [a3 a2];
+   *  addu v0,a2,a0
+   *  addu t1,a3,a1
+   *  sltu v1,v0,a2
+   *  addu v1,v1,t1
+   */
+
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
+  NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  /*
+   *  [v1 v0] =  [a1 a0] - [a3 a2];
+   *  sltu  t1,a0,a2
+   *  subu  v0,a0,a2
+   *  subu  v1,a1,a3
+   *  subu  v1,v1,t1
+   */
+
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  /*
+   *  [v1 v0] =  -[a1 a0]
+   *  negu  v0,a0
+   *  negu  v1,a1
+   *  sltu  t1,r_zero
+   *  subu  v1,v1,t1
+   */
+
+  OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+  OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg);
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
+}
+
+void MipsMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
+}
+
+void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
+}
+
+/*
+ * Generate array load
+ */
+void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  RegLocation rl_result;
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  int reg_ptr = AllocTemp();
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    /* Get len */
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+  }
+  /* reg_ptr -> array data */
+  OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+  FreeTemp(rl_array.low_reg);
+  if ((size == kLong) || (size == kDouble)) {
+    if (scale) {
+      int r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
+    } else {
+      OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+    }
+    FreeTemp(rl_index.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      // TODO: change kCondCS to a more meaningful name, is the sense of
+      // carry-set/clear flipped?
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+    LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+
+    FreeTemp(reg_ptr);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      // TODO: change kCondCS to a more meaningful name, is the sense of
+      // carry-set/clear flipped?
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+    LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+
+    FreeTemp(reg_ptr);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+  int reg_ptr = INVALID_REG;
+  if (IsTemp(rl_array.low_reg)) {
+    Clobber(rl_array.low_reg);
+    reg_ptr = rl_array.low_reg;
+  } else {
+    reg_ptr = AllocTemp();
+    OpRegCopy(reg_ptr, rl_array.low_reg);
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    //NOTE: max live temps(4) here.
+    /* Get len */
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+  }
+  /* reg_ptr -> array data */
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
+  /* at this point, reg_ptr points to array, 2 live temps */
+  if ((size == kLong) || (size == kDouble)) {
+    //TUNING: specific wide routine that can handle fp regs
+    if (scale) {
+      int r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
+    } else {
+      OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+    }
+    rl_src = LoadValueWide(rl_src, reg_class);
+
+    if (needs_range_check) {
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+
+    StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+
+    FreeTemp(reg_ptr);
+  } else {
+    rl_src = LoadValue(rl_src, reg_class);
+    if (needs_range_check) {
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
+    }
+    StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+                     scale, size);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
+
+  int r_value = TargetReg(kArg0);  // Register holding value
+  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
+  int r_array = TargetReg(kArg2);  // Register holding array
+  int r_index = TargetReg(kArg3);  // Register holding index into array
+
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
+
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+
+  // Store of null?
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
+
+  // Get the array's class.
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+                          r_array_class, true);
+  // Redo LoadValues in case they didn't survive the call.
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
+  r_array_class = INVALID_REG;
+
+  // Branch here if value to be stored == null
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  null_value_check->target = target;
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  int reg_len = INVALID_REG;
+  if (needs_range_check) {
+    reg_len = TargetReg(kArg1);
+    LoadWordDisp(r_array, len_offset, reg_len);  // Get len
+  }
+  /* r_ptr -> array data */
+  int r_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, r_ptr, r_array, data_offset);
+  if (needs_range_check) {
+    GenRegRegCheck(kCondCs, r_index, reg_len, kThrowArrayBounds);
+  }
+  StoreBaseIndexed(r_ptr, r_index, r_value, scale, kWord);
+  FreeTemp(r_ptr);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
+  }
+}
+
+void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                    RegLocation rl_src1, RegLocation rl_shift)
+{
+  // Default implementation is just to ignore the constant case.
+  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+}
+
+void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
+                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  // Default - bail to non-const handler.
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
new file mode 100644
index 0000000..ceab9ab
--- /dev/null
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions.
+ *
+ * zero is always the value 0
+ * at is scratch (normally used as temp reg by assembler)
+ * v0, v1 are scratch (normally hold subroutine return values)
+ * a0-a3 are scratch (normally hold subroutine arguments)
+ * t0-t8 are scratch
+ * t9 is scratch (normally used for function calls)
+ * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
+ * s1 (rMIPS_SELF) is reserved [holds current &Thread]
+ * s2-s7 are callee save (promotion target)
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is callee save (promotion target)
+ * ra is scratch (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers (16 df pairs), but currently
+ *       only support 16 fp registers (8 df pairs).
+ * f0-f15
+ * df0-df7, where df0={f0,f1}, df1={f2,f3}, ... , df7={f14,f15}
+ *
+ * f0-f15 (df0-df7) trashed across C calls
+ *
+ * For mips32 code use:
+ *      a0-a3 to hold operands
+ *      v0-v1 to hold results
+ *      t0-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ *  Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1]              |  {Note: resides in caller's frame}
+ * |       .                |
+ * | IN[0]                  |
+ * | caller's Method*       |
+ * +========================+  {Note: start of callee's frame}
+ * | spill region           |  {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1]            |
+ * | V[locals-2]            |
+ * |      .                 |
+ * |      .                 |
+ * | V[1]                   |
+ * | V[0]                   |
+ * +------------------------+
+ * |  0 to 3 words padding  |
+ * +------------------------+
+ * | OUT[outs-1]            |
+ * | OUT[outs-2]            |
+ * |       .                |
+ * | OUT[0]                 |
+ * | cur_method*            | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define MIPS_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define MIPS_FP_DOUBLE 64
+// Offset to distingish the extra regs.
+#define MIPS_EXTRA_REG_OFFSET 128
+// Reg types.
+#define MIPS_REGTYPE(x) (x & (MIPS_FP_REG_OFFSET | MIPS_FP_DOUBLE))
+#define MIPS_FPREG(x) ((x & MIPS_FP_REG_OFFSET) == MIPS_FP_REG_OFFSET)
+#define MIPS_EXTRAREG(x) ((x & MIPS_EXTRA_REG_OFFSET) == MIPS_EXTRA_REG_OFFSET)
+#define MIPS_DOUBLEREG(x) ((x & MIPS_FP_DOUBLE) == MIPS_FP_DOUBLE)
+#define MIPS_SINGLEREG(x) (MIPS_FPREG(x) && !MIPS_DOUBLEREG(x))
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area.  Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define MIPS_S2D(x,y) ((x) | MIPS_FP_DOUBLE)
+// Mask to strip off fp flags.
+#define MIPS_FP_REG_MASK (MIPS_FP_REG_OFFSET-1)
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+#define r_ARG0 r_A0
+#define r_ARG1 r_A1
+#define r_ARG2 r_A2
+#define r_ARG3 r_A3
+#define r_RESULT0 r_V0
+#define r_RESULT1 r_V1
+#else
+#define LOWORD_OFFSET 4
+#define HIWORD_OFFSET 0
+#define r_ARG0 r_A1
+#define r_ARG1 r_A0
+#define r_ARG2 r_A3
+#define r_ARG3 r_A2
+#define r_RESULT0 r_V1
+#define r_RESULT1 r_V0
+#endif
+
+// These are the same for both big and little endian.
+#define r_FARG0 r_F12
+#define r_FARG1 r_F13
+#define r_FARG2 r_F14
+#define r_FARG3 r_F15
+#define r_FRESULT0 r_F0
+#define r_FRESULT1 r_F1
+
+// Regs not used for Mips.
+#define rMIPS_LR INVALID_REG
+#define rMIPS_PC INVALID_REG
+
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
+                           INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0, \
+                                 INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_RESULT0, \
+                                r_RESULT1, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0,\
+                                  r_FRESULT1, INVALID_SREG, INVALID_SREG}
+
+enum MipsResourceEncodingPos {
+  kMipsGPReg0   = 0,
+  kMipsRegSP    = 29,
+  kMipsRegLR    = 31,
+  kMipsFPReg0   = 32, // only 16 fp regs supported currently.
+  kMipsFPRegEnd   = 48,
+  kMipsRegHI    = kMipsFPRegEnd,
+  kMipsRegLO,
+  kMipsRegPC,
+  kMipsRegEnd   = 51,
+};
+
+#define ENCODE_MIPS_REG_LIST(N)      (static_cast<uint64_t>(N))
+#define ENCODE_MIPS_REG_SP           (1ULL << kMipsRegSP)
+#define ENCODE_MIPS_REG_LR           (1ULL << kMipsRegLR)
+#define ENCODE_MIPS_REG_PC           (1ULL << kMipsRegPC)
+
+enum MipsNativeRegisterPool {
+  r_ZERO = 0,
+  r_AT = 1,
+  r_V0 = 2,
+  r_V1 = 3,
+  r_A0 = 4,
+  r_A1 = 5,
+  r_A2 = 6,
+  r_A3 = 7,
+  r_T0 = 8,
+  r_T1 = 9,
+  r_T2 = 10,
+  r_T3 = 11,
+  r_T4 = 12,
+  r_T5 = 13,
+  r_T6 = 14,
+  r_T7 = 15,
+  r_S0 = 16,
+  r_S1 = 17,
+  r_S2 = 18,
+  r_S3 = 19,
+  r_S4 = 20,
+  r_S5 = 21,
+  r_S6 = 22,
+  r_S7 = 23,
+  r_T8 = 24,
+  r_T9 = 25,
+  r_K0 = 26,
+  r_K1 = 27,
+  r_GP = 28,
+  r_SP = 29,
+  r_FP = 30,
+  r_RA = 31,
+
+  r_F0 = 0 + MIPS_FP_REG_OFFSET,
+  r_F1,
+  r_F2,
+  r_F3,
+  r_F4,
+  r_F5,
+  r_F6,
+  r_F7,
+  r_F8,
+  r_F9,
+  r_F10,
+  r_F11,
+  r_F12,
+  r_F13,
+  r_F14,
+  r_F15,
+#if 0
+  /*
+   * TODO: The shared resource mask doesn't have enough bit positions to describe all
+   * MIPS registers.  Expand it and enable use of fp registers 16 through 31.
+   */
+  r_F16,
+  r_F17,
+  r_F18,
+  r_F19,
+  r_F20,
+  r_F21,
+  r_F22,
+  r_F23,
+  r_F24,
+  r_F25,
+  r_F26,
+  r_F27,
+  r_F28,
+  r_F29,
+  r_F30,
+  r_F31,
+#endif
+  r_DF0 = r_F0 + MIPS_FP_DOUBLE,
+  r_DF1 = r_F2 + MIPS_FP_DOUBLE,
+  r_DF2 = r_F4 + MIPS_FP_DOUBLE,
+  r_DF3 = r_F6 + MIPS_FP_DOUBLE,
+  r_DF4 = r_F8 + MIPS_FP_DOUBLE,
+  r_DF5 = r_F10 + MIPS_FP_DOUBLE,
+  r_DF6 = r_F12 + MIPS_FP_DOUBLE,
+  r_DF7 = r_F14 + MIPS_FP_DOUBLE,
+#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
+  r_DF8 = r_F16 + MIPS_FP_DOUBLE,
+  r_DF9 = r_F18 + MIPS_FP_DOUBLE,
+  r_DF10 = r_F20 + MIPS_FP_DOUBLE,
+  r_DF11 = r_F22 + MIPS_FP_DOUBLE,
+  r_DF12 = r_F24 + MIPS_FP_DOUBLE,
+  r_DF13 = r_F26 + MIPS_FP_DOUBLE,
+  r_DF14 = r_F28 + MIPS_FP_DOUBLE,
+  r_DF15 = r_F30 + MIPS_FP_DOUBLE,
+#endif
+  r_HI = MIPS_EXTRA_REG_OFFSET,
+  r_LO,
+  r_PC,
+};
+
+#define rMIPS_SUSPEND r_S0
+#define rMIPS_SELF r_S1
+#define rMIPS_SP r_SP
+#define rMIPS_ARG0 r_ARG0
+#define rMIPS_ARG1 r_ARG1
+#define rMIPS_ARG2 r_ARG2
+#define rMIPS_ARG3 r_ARG3
+#define rMIPS_FARG0 r_FARG0
+#define rMIPS_FARG1 r_FARG1
+#define rMIPS_FARG2 r_FARG2
+#define rMIPS_FARG3 r_FARG3
+#define rMIPS_RET0 r_RESULT0
+#define rMIPS_RET1 r_RESULT1
+#define rMIPS_INVOKE_TGT r_T9
+#define rMIPS_COUNT INVALID_REG
+
+enum MipsShiftEncodings {
+  kMipsLsl = 0x0,
+  kMipsLsr = 0x1,
+  kMipsAsr = 0x2,
+  kMipsRor = 0x3
+};
+
+// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist).
+#define kSYNC0        0x00
+#define kSYNC_WMB     0x04
+#define kSYNC_MB      0x01
+#define kSYNC_ACQUIRE 0x11
+#define kSYNC_RELEASE 0x12
+#define kSYNC_RMB     0x13
+
+// TODO: Use smaller hammer when appropriate for target CPU.
+#define kST kSYNC0
+#define kSY kSYNC0
+
+/*
+ * The following enum defines the list of supported Thumb instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum MipsOpCode {
+  kMipsFirst = 0,
+  kMips32BitData = kMipsFirst, // data [31..0].
+  kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsAddu,  // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+  kMipsAnd,   // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+  kMipsAndi,  // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+  kMipsB,     // b o   [0001000000000000] o[15..0].
+  kMipsBal,   // bal o [0000010000010001] o[15..0].
+  // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+  //       range may require updates.
+  kMipsBeq,   // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+  kMipsBeqz,  // beqz s,o [000100] s[25..21] [00000] o[15..0].
+  kMipsBgez,  // bgez s,o [000001] s[25..21] [00001] o[15..0].
+  kMipsBgtz,  // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+  kMipsBlez,  // blez s,o [000110] s[25..21] [00000] o[15..0].
+  kMipsBltz,  // bltz s,o [000001] s[25..21] [00000] o[15..0].
+  kMipsBnez,  // bnez s,o [000101] s[25..21] [00000] o[15..0].
+  kMipsBne,   // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+  kMipsDiv,   // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
+#if __mips_isa_rev>=2
+  kMipsExt,   // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+#endif
+  kMipsJal,   // jal t [000011] t[25..0].
+  kMipsJalr,  // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+  kMipsJr,    // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+  kMipsLahi,  // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+  kMipsLalo,  // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+  kMipsLui,   // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+  kMipsLb,    // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+  kMipsLbu,   // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+  kMipsLh,    // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+  kMipsLhu,   // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+  kMipsLw,    // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+  kMipsMfhi,  // mfhi d [0000000000000000] d[15..11] [00000010000].
+  kMipsMflo,  // mflo d [0000000000000000] d[15..11] [00000010010].
+  kMipsMove,  // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+  kMipsMovz,  // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
+  kMipsMul,   // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+  kMipsNop,   // nop [00000000000000000000000000000000].
+  kMipsNor,   // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+  kMipsOr,    // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+  kMipsOri,   // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsPref,  // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+  kMipsSb,    // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+#if __mips_isa_rev>=2
+  kMipsSeb,   // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+  kMipsSeh,   // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+#endif
+  kMipsSh,    // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+  kMipsSll,   // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+  kMipsSllv,  // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+  kMipsSlt,   // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+  kMipsSlti,  // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+  kMipsSltu,  // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+  kMipsSra,   // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+  kMipsSrav,  // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+  kMipsSrl,   // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+  kMipsSrlv,  // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+  kMipsSubu,  // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+  kMipsSw,    // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+  kMipsXor,   // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+  kMipsXori,  // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+  kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFcvtsd,// cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtsw,// cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtds,// cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtdw,// cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtws,// cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+  kMipsFcvtwd,// cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+  kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+  kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+  kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+  kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+  kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+  kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+  kMipsMfc1,  // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+  kMipsMtc1,  // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+  kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
+  kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+  kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+  kMipsCurrPC,  // jal to .+8 to materialize pc.
+  kMipsSync,    // sync kind [000000] [0000000000000000] s[10..6] [001111].
+  kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
+  kMipsLast
+};
+
+// Instruction assembly field_loc kind.
+enum MipsEncodingKind {
+  kFmtUnused,
+  kFmtBitBlt,    /* Bit string using end/start */
+  kFmtDfp,       /* Double FP reg */
+  kFmtSfp,       /* Single FP reg */
+  kFmtBlt5_2,    /* Same 5-bit field to 2 locations */
+};
+
+// Struct used to define the snippet positions for each MIPS opcode.
+struct MipsEncodingMap {
+  uint32_t skeleton;
+  struct {
+    MipsEncodingKind kind;
+    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
+    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+  } field_loc[4];
+  MipsOpCode opcode;
+  uint64_t flags;
+  const char *name;
+  const char* fmt;
+  int size;   // Note: size is in bytes.
+};
+
+extern MipsEncodingMap EncodingMap[kMipsLast];
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
new file mode 100644
index 0000000..cab2c1b
--- /dev/null
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+
+#include <string>
+
+namespace art {
+
+static int core_regs[] = {r_ZERO, r_AT, r_V0, r_V1, r_A0, r_A1, r_A2, r_A3,
+                          r_T0, r_T1, r_T2, r_T3, r_T4, r_T5, r_T6, r_T7,
+                          r_S0, r_S1, r_S2, r_S3, r_S4, r_S5, r_S6, r_S7, r_T8,
+                          r_T9, r_K0, r_K1, r_GP, r_SP, r_FP, r_RA};
+static int ReservedRegs[] = {r_ZERO, r_AT, r_S0, r_S1, r_K0, r_K1, r_GP, r_SP,
+                             r_RA};
+static int core_temps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
+                           r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
+static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+                       r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+                         r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+
+RegLocation MipsMir2Lir::LocCReturn()
+{
+  RegLocation res = MIPS_LOC_C_RETURN;
+  return res;
+}
+
+RegLocation MipsMir2Lir::LocCReturnWide()
+{
+  RegLocation res = MIPS_LOC_C_RETURN_WIDE;
+  return res;
+}
+
+RegLocation MipsMir2Lir::LocCReturnFloat()
+{
+  RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
+  return res;
+}
+
+RegLocation MipsMir2Lir::LocCReturnDouble()
+{
+  RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
+  return res;
+}
+
+// Return a target-dependent special register.
+int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
+  int res = INVALID_REG;
+  switch (reg) {
+    case kSelf: res = rMIPS_SELF; break;
+    case kSuspend: res =  rMIPS_SUSPEND; break;
+    case kLr: res =  rMIPS_LR; break;
+    case kPc: res =  rMIPS_PC; break;
+    case kSp: res =  rMIPS_SP; break;
+    case kArg0: res = rMIPS_ARG0; break;
+    case kArg1: res = rMIPS_ARG1; break;
+    case kArg2: res = rMIPS_ARG2; break;
+    case kArg3: res = rMIPS_ARG3; break;
+    case kFArg0: res = rMIPS_FARG0; break;
+    case kFArg1: res = rMIPS_FARG1; break;
+    case kFArg2: res = rMIPS_FARG2; break;
+    case kFArg3: res = rMIPS_FARG3; break;
+    case kRet0: res = rMIPS_RET0; break;
+    case kRet1: res = rMIPS_RET1; break;
+    case kInvokeTgt: res = rMIPS_INVOKE_TGT; break;
+    case kCount: res = rMIPS_COUNT; break;
+  }
+  return res;
+}
+
+// Create a double from a pair of singles.
+int MipsMir2Lir::S2d(int low_reg, int high_reg)
+{
+  return MIPS_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t MipsMir2Lir::FpRegMask()
+{
+  return MIPS_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool MipsMir2Lir::SameRegType(int reg1, int reg2)
+{
+  return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t MipsMir2Lir::GetRegMaskCommon(int reg)
+{
+  uint64_t seed;
+  int shift;
+  int reg_id;
+
+
+  reg_id = reg & 0x1f;
+  /* Each double register is equal to a pair of single-precision FP registers */
+  seed = MIPS_DOUBLEREG(reg) ? 3 : 1;
+  /* FP register starts at bit position 16 */
+  shift = MIPS_FPREG(reg) ? kMipsFPReg0 : 0;
+  /* Expand the double register id into single offset */
+  shift += reg_id;
+  return (seed << shift);
+}
+
+uint64_t MipsMir2Lir::GetPCUseDefEncoding()
+{
+  return ENCODE_MIPS_REG_PC;
+}
+
+
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir)
+{
+  DCHECK_EQ(cu_->instruction_set, kMips);
+
+  // Mips-specific resource map setup here.
+  uint64_t flags = MipsMir2Lir::EncodingMap[lir->opcode].flags;
+
+  if (flags & REG_DEF_SP) {
+    lir->def_mask |= ENCODE_MIPS_REG_SP;
+  }
+
+  if (flags & REG_USE_SP) {
+    lir->use_mask |= ENCODE_MIPS_REG_SP;
+  }
+
+  if (flags & REG_DEF_LR) {
+    lir->def_mask |= ENCODE_MIPS_REG_LR;
+  }
+}
+
+/* For dumping instructions */
+#define MIPS_REG_COUNT 32
+static const char *mips_reg_name[MIPS_REG_COUNT] = {
+  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
+{
+  std::string buf;
+  int i;
+  const char *fmt_end = &fmt[strlen(fmt)];
+  char tbuf[256];
+  char nc;
+  while (fmt < fmt_end) {
+    int operand;
+    if (*fmt == '!') {
+      fmt++;
+      DCHECK_LT(fmt, fmt_end);
+      nc = *fmt++;
+      if (nc=='!') {
+        strcpy(tbuf, "!");
+      } else {
+         DCHECK_LT(fmt, fmt_end);
+         DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
+         operand = lir->operands[nc-'0'];
+         switch (*fmt++) {
+           case 'b':
+             strcpy(tbuf,"0000");
+             for (i=3; i>= 0; i--) {
+               tbuf[i] += operand & 1;
+               operand >>= 1;
+             }
+             break;
+           case 's':
+             sprintf(tbuf,"$f%d",operand & MIPS_FP_REG_MASK);
+             break;
+           case 'S':
+             DCHECK_EQ(((operand & MIPS_FP_REG_MASK) & 1), 0);
+             sprintf(tbuf,"$f%d",operand & MIPS_FP_REG_MASK);
+             break;
+           case 'h':
+             sprintf(tbuf,"%04x", operand);
+             break;
+           case 'M':
+           case 'd':
+             sprintf(tbuf,"%d", operand);
+             break;
+           case 'D':
+             sprintf(tbuf,"%d", operand+1);
+             break;
+           case 'E':
+             sprintf(tbuf,"%d", operand*4);
+             break;
+           case 'F':
+             sprintf(tbuf,"%d", operand*2);
+             break;
+           case 't':
+             sprintf(tbuf,"0x%08x (L%p)", reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
+                     (operand << 2), lir->target);
+             break;
+           case 'T':
+             sprintf(tbuf,"0x%08x", operand << 2);
+             break;
+           case 'u': {
+             int offset_1 = lir->operands[0];
+             int offset_2 = NEXT_LIR(lir)->operands[0];
+             uintptr_t target =
+                 (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
+                 (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+             sprintf(tbuf, "%p", reinterpret_cast<void*>(target));
+             break;
+          }
+
+           /* Nothing to print for BLX_2 */
+           case 'v':
+             strcpy(tbuf, "see above");
+             break;
+           case 'r':
+             DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
+             strcpy(tbuf, mips_reg_name[operand]);
+             break;
+           case 'N':
+             // Placeholder for delay slot handling
+             strcpy(tbuf, ";  nop");
+             break;
+           default:
+             strcpy(tbuf,"DecodeError");
+             break;
+         }
+         buf += tbuf;
+      }
+    } else {
+       buf += *fmt++;
+    }
+  }
+  return buf;
+}
+
+// FIXME: need to redo resource maps for MIPS - fix this at that time
+void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
+{
+  char buf[256];
+  buf[0] = 0;
+
+  if (mask == ENCODE_ALL) {
+    strcpy(buf, "all");
+  } else {
+    char num[8];
+    int i;
+
+    for (i = 0; i < kMipsRegEnd; i++) {
+      if (mask & (1ULL << i)) {
+        sprintf(num, "%d ", i);
+        strcat(buf, num);
+      }
+    }
+
+    if (mask & ENCODE_CCODE) {
+      strcat(buf, "cc ");
+    }
+    if (mask & ENCODE_FP_STATUS) {
+      strcat(buf, "fpcc ");
+    }
+    /* Memory bits */
+    if (mips_lir && (mask & ENCODE_DALVIK_REG)) {
+      sprintf(buf + strlen(buf), "dr%d%s", mips_lir->alias_info & 0xffff,
+              (mips_lir->alias_info & 0x80000000) ? "(+1)" : "");
+    }
+    if (mask & ENCODE_LITERAL) {
+      strcat(buf, "lit ");
+    }
+
+    if (mask & ENCODE_HEAP_REF) {
+      strcat(buf, "heap ");
+    }
+    if (mask & ENCODE_MUST_NOT_ALIAS) {
+      strcat(buf, "noalias ");
+    }
+  }
+  if (buf[0]) {
+    LOG(INFO) << prefix << ": " <<  buf;
+  }
+}
+
+/*
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
+ * instructions might call out to C/assembly helper functions.  Until
+ * machinery is in place, always spill lr.
+ */
+
+void MipsMir2Lir::AdjustSpillMask()
+{
+  core_spill_mask_ |= (1 << r_RA);
+  num_core_spills_++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted.  Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask.  Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg)
+{
+  LOG(FATAL) << "No support yet for promoted FP regs";
+}
+
+void MipsMir2Lir::FlushRegWide(int reg1, int reg2)
+{
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
+  DCHECK(info1 && info2 && info1->pair && info2->pair &&
+         (info1->partner == info2->reg) &&
+         (info2->partner == info1->reg));
+  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+    if (!(info1->is_temp && info2->is_temp)) {
+      /* Should not happen.  If it does, there's a problem in eval_loc */
+      LOG(FATAL) << "Long half-temp, half-promoted";
+    }
+
+    info1->dirty = false;
+    info2->dirty = false;
+    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
+      info1 = info2;
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rMIPS_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+  }
+}
+
+void MipsMir2Lir::FlushReg(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  if (info->live && info->dirty) {
+    info->dirty = false;
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rMIPS_SP, VRegOffset(v_reg), reg, kWord);
+  }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool MipsMir2Lir::IsFpReg(int reg) {
+  return MIPS_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void MipsMir2Lir::ClobberCalleeSave()
+{
+  Clobber(r_ZERO);
+  Clobber(r_AT);
+  Clobber(r_V0);
+  Clobber(r_V1);
+  Clobber(r_A0);
+  Clobber(r_A1);
+  Clobber(r_A2);
+  Clobber(r_A3);
+  Clobber(r_T0);
+  Clobber(r_T1);
+  Clobber(r_T2);
+  Clobber(r_T3);
+  Clobber(r_T4);
+  Clobber(r_T5);
+  Clobber(r_T6);
+  Clobber(r_T7);
+  Clobber(r_T8);
+  Clobber(r_T9);
+  Clobber(r_K0);
+  Clobber(r_K1);
+  Clobber(r_GP);
+  Clobber(r_FP);
+  Clobber(r_RA);
+  Clobber(r_F0);
+  Clobber(r_F1);
+  Clobber(r_F2);
+  Clobber(r_F3);
+  Clobber(r_F4);
+  Clobber(r_F5);
+  Clobber(r_F6);
+  Clobber(r_F7);
+  Clobber(r_F8);
+  Clobber(r_F9);
+  Clobber(r_F10);
+  Clobber(r_F11);
+  Clobber(r_F12);
+  Clobber(r_F13);
+  Clobber(r_F14);
+  Clobber(r_F15);
+}
+
+RegLocation MipsMir2Lir::GetReturnWideAlt()
+{
+  UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
+  RegLocation res = LocCReturnWide();
+  return res;
+}
+
+RegLocation MipsMir2Lir::GetReturnAlt()
+{
+  UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
+  RegLocation res = LocCReturn();
+  return res;
+}
+
+MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg)
+{
+  return MIPS_FPREG(reg) ? &reg_pool_->FPRegs[reg & MIPS_FP_REG_MASK]
+            : &reg_pool_->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void MipsMir2Lir::LockCallTemps()
+{
+  LockTemp(rMIPS_ARG0);
+  LockTemp(rMIPS_ARG1);
+  LockTemp(rMIPS_ARG2);
+  LockTemp(rMIPS_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+void MipsMir2Lir::FreeCallTemps()
+{
+  FreeTemp(rMIPS_ARG0);
+  FreeTemp(rMIPS_ARG1);
+  FreeTemp(rMIPS_ARG2);
+  FreeTemp(rMIPS_ARG3);
+}
+
+void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+  NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
+#endif
+}
+
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int MipsMir2Lir::AllocTypedTempPair(bool fp_hint,
+                  int reg_class)
+{
+  int high_reg;
+  int low_reg;
+  int res = 0;
+
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    low_reg = AllocTempDouble();
+    high_reg = low_reg + 1;
+    res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+    return res;
+  }
+
+  low_reg = AllocTemp();
+  high_reg = AllocTemp();
+  res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+  return res;
+}
+
+int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class)
+{
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+{
+    return AllocTempFloat();
+}
+  return AllocTemp();
+}
+
+void MipsMir2Lir::CompilerInitializeRegAlloc()
+{
+  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs = static_cast<RegisterInfo*>
+     (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                     ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs = static_cast<RegisterInfo*>
+      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                      ArenaAllocator::kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
+  // Keep special registers from being allocated
+  for (int i = 0; i < num_reserved; i++) {
+    if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
+      //To measure cost of suspend check
+      continue;
+    }
+    MarkInUse(ReservedRegs[i]);
+  }
+  // Mark temp regs - all others not in use can be used for promotion
+  for (int i = 0; i < num_temps; i++) {
+    MarkTemp(core_temps[i]);
+  }
+  for (int i = 0; i < num_fp_temps; i++) {
+    MarkTemp(fp_temps[i]);
+  }
+}
+
+void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free)
+{
+  if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+    (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+    // No overlap, free both
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
+  }
+}
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address.  However, for Mips we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow.  Allocate a temp register.
+ */
+int MipsMir2Lir::LoadHelper(int offset)
+{
+  LoadWordDisp(rMIPS_SELF, offset, r_T9);
+  return r_T9;
+}
+
+void MipsMir2Lir::SpillCoreRegs()
+{
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  uint32_t mask = core_spill_mask_;
+  int offset = num_core_spills_ * 4;
+  OpRegImm(kOpSub, rMIPS_SP, offset);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      offset -= 4;
+      StoreWordDisp(rMIPS_SP, offset, reg);
+    }
+  }
+}
+
+void MipsMir2Lir::UnSpillCoreRegs()
+{
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  uint32_t mask = core_spill_mask_;
+  int offset = frame_size_;
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      offset -= 4;
+      LoadWordDisp(rMIPS_SP, offset, reg);
+    }
+  }
+  OpRegImm(kOpAdd, rMIPS_SP, frame_size_);
+}
+
+bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir)
+{
+  return (lir->opcode == kMipsB);
+}
+
+MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
+  for (int i = 0; i < kMipsLast; i++) {
+    if (MipsMir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+                 << " is wrong: expecting " << i << ", seeing "
+                 << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
+    }
+  }
+}
+
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                           ArenaAllocator* const arena) {
+  return new MipsMir2Lir(cu, mir_graph, arena);
+}
+
+uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode)
+{
+  return MipsMir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* MipsMir2Lir::GetTargetInstName(int opcode)
+{
+  return MipsMir2Lir::EncodingMap[opcode].name;
+}
+
+const char* MipsMir2Lir::GetTargetInstFmt(int opcode)
+{
+  return MipsMir2Lir::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
new file mode 100644
index 0000000..8daafc8
--- /dev/null
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -0,0 +1,700 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the MIPS32 ISA. */
+LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src)
+{
+  int opcode;
+  /* must be both DOUBLE or both not DOUBLE */
+  DCHECK_EQ(MIPS_DOUBLEREG(r_dest),MIPS_DOUBLEREG(r_src));
+  if (MIPS_DOUBLEREG(r_dest)) {
+    opcode = kMipsFmovd;
+  } else {
+    if (MIPS_SINGLEREG(r_dest)) {
+      if (MIPS_SINGLEREG(r_src)) {
+        opcode = kMipsFmovs;
+      } else {
+        /* note the operands are swapped for the mtc1 instr */
+        int t_opnd = r_src;
+        r_src = r_dest;
+        r_dest = t_opnd;
+        opcode = kMipsMtc1;
+      }
+    } else {
+      DCHECK(MIPS_SINGLEREG(r_src));
+      opcode = kMipsMfc1;
+    }
+  }
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src, r_dest);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+bool MipsMir2Lir::InexpensiveConstantInt(int32_t value)
+{
+  return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
+}
+
+bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value)
+{
+  return false;  // TUNING
+}
+
+bool MipsMir2Lir::InexpensiveConstantLong(int64_t value)
+{
+  return false;  // TUNING
+}
+
+bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value)
+{
+  return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.  If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value)
+{
+  LIR *res;
+
+  int r_dest_save = r_dest;
+  int is_fp_reg = MIPS_FPREG(r_dest);
+  if (is_fp_reg) {
+    DCHECK(MIPS_SINGLEREG(r_dest));
+    r_dest = AllocTemp();
+  }
+
+  /* See if the value can be constructed cheaply */
+  if (value == 0) {
+    res = NewLIR2(kMipsMove, r_dest, r_ZERO);
+  } else if ((value > 0) && (value <= 65535)) {
+    res = NewLIR3(kMipsOri, r_dest, r_ZERO, value);
+  } else if ((value < 0) && (value >= -32768)) {
+    res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
+  } else {
+    res = NewLIR2(kMipsLui, r_dest, value>>16);
+    if (value & 0xffff)
+      NewLIR3(kMipsOri, r_dest, r_dest, value);
+  }
+
+  if (is_fp_reg) {
+    NewLIR2(kMipsMtc1, r_dest, r_dest_save);
+    FreeTemp(r_dest);
+  }
+
+  return res;
+}
+
+LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target)
+{
+  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ );
+  res->target = target;
+  return res;
+}
+
+LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src)
+{
+  MipsOpCode opcode = kMipsNop;
+  switch (op) {
+    case kOpBlx:
+      opcode = kMipsJalr;
+      break;
+    case kOpBx:
+      return NewLIR1(kMipsJr, r_dest_src);
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpReg";
+  }
+  return NewLIR2(opcode, r_RA, r_dest_src);
+}
+
+LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1,
+          int value)
+{
+  LIR *res;
+  bool neg = (value < 0);
+  int abs_value = (neg) ? -value : value;
+  bool short_form = (abs_value & 0xff) == abs_value;
+  MipsOpCode opcode = kMipsNop;
+  switch (op) {
+    case kOpAdd:
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+      break;
+    case kOpSub:
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpRegImm";
+      break;
+  }
+  if (short_form)
+    res = NewLIR2(opcode, r_dest_src1, abs_value);
+  else {
+    int r_scratch = AllocTemp();
+    res = LoadConstant(r_scratch, value);
+    if (op == kOpCmp)
+      NewLIR2(opcode, r_dest_src1, r_scratch);
+    else
+      NewLIR3(opcode, r_dest_src1, r_dest_src1, r_scratch);
+  }
+  return res;
+}
+
+LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2)
+{
+  MipsOpCode opcode = kMipsNop;
+  switch (op) {
+    case kOpAdd:
+      opcode = kMipsAddu;
+      break;
+    case kOpSub:
+      opcode = kMipsSubu;
+      break;
+    case kOpAnd:
+      opcode = kMipsAnd;
+      break;
+    case kOpMul:
+      opcode = kMipsMul;
+      break;
+    case kOpOr:
+      opcode = kMipsOr;
+      break;
+    case kOpXor:
+      opcode = kMipsXor;
+      break;
+    case kOpLsl:
+      opcode = kMipsSllv;
+      break;
+    case kOpLsr:
+      opcode = kMipsSrlv;
+      break;
+    case kOpAsr:
+      opcode = kMipsSrav;
+      break;
+    case kOpAdc:
+    case kOpSbc:
+      LOG(FATAL) << "No carry bit on MIPS";
+      break;
+    default:
+      LOG(FATAL) << "bad case in OpRegRegReg";
+      break;
+  }
+  return NewLIR3(opcode, r_dest, r_src1, r_src2);
+}
+
+LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value)
+{
+  LIR *res;
+  MipsOpCode opcode = kMipsNop;
+  bool short_form = true;
+
+  switch (op) {
+    case kOpAdd:
+      if (IS_SIMM16(value)) {
+        opcode = kMipsAddiu;
+      }
+      else {
+        short_form = false;
+        opcode = kMipsAddu;
+      }
+      break;
+    case kOpSub:
+      if (IS_SIMM16((-value))) {
+        value = -value;
+        opcode = kMipsAddiu;
+      }
+      else {
+        short_form = false;
+        opcode = kMipsSubu;
+      }
+      break;
+    case kOpLsl:
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSll;
+        break;
+    case kOpLsr:
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSrl;
+        break;
+    case kOpAsr:
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMipsSra;
+        break;
+    case kOpAnd:
+      if (IS_UIMM16((value))) {
+        opcode = kMipsAndi;
+      }
+      else {
+        short_form = false;
+        opcode = kMipsAnd;
+      }
+      break;
+    case kOpOr:
+      if (IS_UIMM16((value))) {
+        opcode = kMipsOri;
+      }
+      else {
+        short_form = false;
+        opcode = kMipsOr;
+      }
+      break;
+    case kOpXor:
+      if (IS_UIMM16((value))) {
+        opcode = kMipsXori;
+      }
+      else {
+        short_form = false;
+        opcode = kMipsXor;
+      }
+      break;
+    case kOpMul:
+      short_form = false;
+      opcode = kMipsMul;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpRegRegImm";
+      break;
+  }
+
+  if (short_form)
+    res = NewLIR3(opcode, r_dest, r_src1, value);
+  else {
+    if (r_dest != r_src1) {
+      res = LoadConstant(r_dest, value);
+      NewLIR3(opcode, r_dest, r_src1, r_dest);
+    } else {
+      int r_scratch = AllocTemp();
+      res = LoadConstant(r_scratch, value);
+      NewLIR3(opcode, r_dest, r_src1, r_scratch);
+    }
+  }
+  return res;
+}
+
+LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
+{
+  MipsOpCode opcode = kMipsNop;
+  LIR *res;
+  switch (op) {
+    case kOpMov:
+      opcode = kMipsMove;
+      break;
+    case kOpMvn:
+      return NewLIR3(kMipsNor, r_dest_src1, r_src2, r_ZERO);
+    case kOpNeg:
+      return NewLIR3(kMipsSubu, r_dest_src1, r_ZERO, r_src2);
+    case kOpAdd:
+    case kOpAnd:
+    case kOpMul:
+    case kOpOr:
+    case kOpSub:
+    case kOpXor:
+      return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
+    case kOp2Byte:
+#if __mips_isa_rev>=2
+      res = NewLIR2(kMipsSeb, r_dest_src1, r_src2);
+#else
+      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+#endif
+      return res;
+    case kOp2Short:
+#if __mips_isa_rev>=2
+      res = NewLIR2(kMipsSeh, r_dest_src1, r_src2);
+#else
+      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+#endif
+      return res;
+    case kOp2Char:
+       return NewLIR3(kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
+    default:
+      LOG(FATAL) << "Bad case in OpRegReg";
+      break;
+  }
+  return NewLIR2(opcode, r_dest_src1, r_src2);
+}
+
+LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
+{
+  LIR *res;
+  res = LoadConstantNoClobber(r_dest_lo, Low32Bits(value));
+  LoadConstantNoClobber(r_dest_hi, High32Bits(value));
+  return res;
+}
+
+/* Load value from base + scaled index. */
+LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
+                                  int scale, OpSize size)
+{
+  LIR *first = NULL;
+  LIR *res;
+  MipsOpCode opcode = kMipsNop;
+  int t_reg = AllocTemp();
+
+  if (MIPS_FPREG(r_dest)) {
+    DCHECK(MIPS_SINGLEREG(r_dest));
+    DCHECK((size == kWord) || (size == kSingle));
+    size = kSingle;
+  } else {
+    if (size == kSingle)
+      size = kWord;
+  }
+
+  if (!scale) {
+    first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
+  } else {
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
+  }
+
+  switch (size) {
+    case kSingle:
+      opcode = kMipsFlwc1;
+      break;
+    case kWord:
+      opcode = kMipsLw;
+      break;
+    case kUnsignedHalf:
+      opcode = kMipsLhu;
+      break;
+    case kSignedHalf:
+      opcode = kMipsLh;
+      break;
+    case kUnsignedByte:
+      opcode = kMipsLbu;
+      break;
+    case kSignedByte:
+      opcode = kMipsLb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexed";
+  }
+
+  res = NewLIR3(opcode, r_dest, 0, t_reg);
+  FreeTemp(t_reg);
+  return (first) ? first : res;
+}
+
+/* store value base base + scaled index. */
+LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+                                   int scale, OpSize size)
+{
+  LIR *first = NULL;
+  MipsOpCode opcode = kMipsNop;
+  int r_new_index = r_index;
+  int t_reg = AllocTemp();
+
+  if (MIPS_FPREG(r_src)) {
+    DCHECK(MIPS_SINGLEREG(r_src));
+    DCHECK((size == kWord) || (size == kSingle));
+    size = kSingle;
+  } else {
+    if (size == kSingle)
+      size = kWord;
+  }
+
+  if (!scale) {
+    first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
+  } else {
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
+  }
+
+  switch (size) {
+    case kSingle:
+      opcode = kMipsFswc1;
+      break;
+    case kWord:
+      opcode = kMipsSw;
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = kMipsSh;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kMipsSb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in StoreBaseIndexed";
+  }
+  NewLIR3(opcode, r_src, 0, t_reg);
+  FreeTemp(r_new_index);
+  return first;
+}
+
+LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
+                                   int r_dest_hi, OpSize size, int s_reg)
+/*
+ * Load value from base + displacement.  Optionally perform null check
+ * on base (which must have an associated s_reg and MIR).  If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps.  If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+{
+  LIR *res;
+  LIR *load = NULL;
+  LIR *load2 = NULL;
+  MipsOpCode opcode = kMipsNop;
+  bool short_form = IS_SIMM16(displacement);
+  bool pair = false;
+
+  switch (size) {
+    case kLong:
+    case kDouble:
+      pair = true;
+      opcode = kMipsLw;
+      if (MIPS_FPREG(r_dest)) {
+        opcode = kMipsFlwc1;
+        if (MIPS_DOUBLEREG(r_dest)) {
+          r_dest = r_dest - MIPS_FP_DOUBLE;
+        } else {
+          DCHECK(MIPS_FPREG(r_dest_hi));
+          DCHECK(r_dest == (r_dest_hi - 1));
+        }
+        r_dest_hi = r_dest + 1;
+      }
+      short_form = IS_SIMM16_2WORD(displacement);
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = kMipsLw;
+      if (MIPS_FPREG(r_dest)) {
+        opcode = kMipsFlwc1;
+        DCHECK(MIPS_SINGLEREG(r_dest));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+      opcode = kMipsLhu;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kSignedHalf:
+      opcode = kMipsLh;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+      opcode = kMipsLbu;
+      break;
+    case kSignedByte:
+      opcode = kMipsLb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
+  }
+
+  if (short_form) {
+    if (!pair) {
+      load = res = NewLIR3(opcode, r_dest, displacement, rBase);
+    } else {
+      load = res = NewLIR3(opcode, r_dest,
+                           displacement + LOWORD_OFFSET, rBase);
+      load2 = NewLIR3(opcode, r_dest_hi,
+                      displacement + HIWORD_OFFSET, rBase);
+    }
+  } else {
+    if (pair) {
+      int r_tmp = AllocFreeTemp();
+      res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
+      load = NewLIR3(opcode, r_dest, LOWORD_OFFSET, r_tmp);
+      load2 = NewLIR3(opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
+      FreeTemp(r_tmp);
+    } else {
+      int r_tmp = (rBase == r_dest) ? AllocFreeTemp() : r_dest;
+      res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
+      load = NewLIR3(opcode, r_dest, 0, r_tmp);
+      if (r_tmp != r_dest)
+        FreeTemp(r_tmp);
+    }
+  }
+
+  if (rBase == rMIPS_SP) {
+    AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                            true /* is_load */, pair /* is64bit */);
+    if (pair) {
+      AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+                              true /* is_load */, pair /* is64bit */);
+    }
+  }
+  return load;
+}
+
+LIR* MipsMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
+                               OpSize size, int s_reg)
+{
+  return LoadBaseDispBody(rBase, displacement, r_dest, -1,
+                          size, s_reg);
+}
+
+LIR* MipsMir2Lir::LoadBaseDispWide(int rBase, int displacement,
+                                   int r_dest_lo, int r_dest_hi, int s_reg)
+{
+  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement,
+                                    int r_src, int r_src_hi, OpSize size)
+{
+  LIR *res;
+  LIR *store = NULL;
+  LIR *store2 = NULL;
+  MipsOpCode opcode = kMipsNop;
+  bool short_form = IS_SIMM16(displacement);
+  bool pair = false;
+
+  switch (size) {
+    case kLong:
+    case kDouble:
+      pair = true;
+      opcode = kMipsSw;
+      if (MIPS_FPREG(r_src)) {
+        opcode = kMipsFswc1;
+        if (MIPS_DOUBLEREG(r_src)) {
+          r_src = r_src - MIPS_FP_DOUBLE;
+        } else {
+          DCHECK(MIPS_FPREG(r_src_hi));
+          DCHECK_EQ(r_src, (r_src_hi - 1));
+        }
+        r_src_hi = r_src + 1;
+      }
+      short_form = IS_SIMM16_2WORD(displacement);
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = kMipsSw;
+      if (MIPS_FPREG(r_src)) {
+        opcode = kMipsFswc1;
+        DCHECK(MIPS_SINGLEREG(r_src));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = kMipsSh;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kMipsSb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in StoreBaseIndexedBody";
+  }
+
+  if (short_form) {
+    if (!pair) {
+      store = res = NewLIR3(opcode, r_src, displacement, rBase);
+    } else {
+      store = res = NewLIR3(opcode, r_src, displacement + LOWORD_OFFSET,
+                            rBase);
+      store2 = NewLIR3(opcode, r_src_hi, displacement + HIWORD_OFFSET,
+                       rBase);
+    }
+  } else {
+    int r_scratch = AllocTemp();
+    res = OpRegRegImm(kOpAdd, r_scratch, rBase, displacement);
+    if (!pair) {
+      store =  NewLIR3(opcode, r_src, 0, r_scratch);
+    } else {
+      store =  NewLIR3(opcode, r_src, LOWORD_OFFSET, r_scratch);
+      store2 = NewLIR3(opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
+    }
+    FreeTemp(r_scratch);
+  }
+
+  if (rBase == rMIPS_SP) {
+    AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                            false /* is_load */, pair /* is64bit */);
+    if (pair) {
+      AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+                              false /* is_load */, pair /* is64bit */);
+    }
+  }
+
+  return res;
+}
+
+LIR* MipsMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
+                                OpSize size)
+{
+  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
+}
+
+LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement,
+                                    int r_src_lo, int r_src_hi)
+{
+  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
+}
+
+LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset)
+{
+  LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp)
+{
+  LOG(FATAL) << "Unexpected use of OpMem for MIPS";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement,
+                                        int r_src, int r_src_hi, OpSize size, int s_reg)
+{
+  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
+              int offset)
+{
+  LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_dest, int r_dest_hi, OpSize size, int s_reg)
+{
+  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
+  return NULL;
+}
+
+LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
+{
+  LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
+  return NULL;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
new file mode 100644
index 0000000..4eef264
--- /dev/null
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
+
+#include "mir_to_lir.h"
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+/* Mark a temp register as dead.  Does not affect allocation state. */
+inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
+  if (p->is_temp) {
+    DCHECK(!(p->live && p->dirty))  << "Live & dirty temp in clobber";
+    p->live = false;
+    p->s_reg = INVALID_SREG;
+    p->def_start = NULL;
+    p->def_end = NULL;
+    if (p->pair) {
+      p->pair = false;
+      Clobber(p->partner);
+    }
+  }
+}
+
+inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+                            int op1, int op2, int op3, int op4, LIR* target) {
+  LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+  insn->dalvik_offset = dalvik_offset;
+  insn->opcode = opcode;
+  insn->operands[0] = op0;
+  insn->operands[1] = op1;
+  insn->operands[2] = op2;
+  insn->operands[3] = op3;
+  insn->operands[4] = op4;
+  insn->target = target;
+  SetupResourceMasks(insn);
+  if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
+      (opcode == kPseudoExportedPC)) {
+    // Always make labels scheduling barriers
+    insn->use_mask = insn->def_mask = ENCODE_ALL;
+  }
+  return insn;
+}
+
+/*
+ * The following are building blocks to construct low-level IRs with 0 - 4
+ * operands.
+ */
+inline LIR* Mir2Lir::NewLIR0(int opcode) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode);
+  AppendLIR(insn);
+  return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
+  AppendLIR(insn);
+  return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
+  AppendLIR(insn);
+  return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
+  AppendLIR(insn);
+  return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
+  AppendLIR(insn);
+  return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
+                             int info2) {
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
+  AppendLIR(insn);
+  return insn;
+}
+
+/*
+ * Mark the corresponding bit(s).
+ */
+inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) {
+  *mask |= GetRegMaskCommon(reg);
+}
+
+/*
+ * Set up the proper fields in the resource mask
+ */
+inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
+  int opcode = lir->opcode;
+
+  if (opcode <= 0) {
+    lir->use_mask = lir->def_mask = 0;
+    return;
+  }
+
+  uint64_t flags = GetTargetInstFlags(opcode);
+
+  if (flags & NEEDS_FIXUP) {
+    lir->flags.pcRelFixup = true;
+  }
+
+  /* Get the starting size of the instruction's template */
+  lir->flags.size = GetInsnSize(lir);
+
+  /* Set up the mask for resources that are updated */
+  if (flags & (IS_LOAD | IS_STORE)) {
+    /* Default to heap - will catch specialized classes later */
+    SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
+  }
+
+  /*
+   * Conservatively assume the branch here will call out a function that in
+   * turn will trash everything.
+   */
+  if (flags & IS_BRANCH) {
+    lir->def_mask = lir->use_mask = ENCODE_ALL;
+    return;
+  }
+
+  if (flags & REG_DEF0) {
+    SetupRegMask(&lir->def_mask, lir->operands[0]);
+  }
+
+  if (flags & REG_DEF1) {
+    SetupRegMask(&lir->def_mask, lir->operands[1]);
+  }
+
+
+  if (flags & SETS_CCODES) {
+    lir->def_mask |= ENCODE_CCODE;
+  }
+
+  if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+    int i;
+
+    for (i = 0; i < 4; i++) {
+      if (flags & (1 << (kRegUse0 + i))) {
+        SetupRegMask(&lir->use_mask, lir->operands[i]);
+      }
+    }
+  }
+
+  if (flags & USES_CCODES) {
+    lir->use_mask |= ENCODE_CCODE;
+  }
+
+  // Handle target-specific actions
+  SetupTargetResourceMasks(lir);
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
new file mode 100644
index 0000000..4562482
--- /dev/null
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -0,0 +1,843 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/compiler_internals.h"
+#include "dex/dataflow_iterator-inl.h"
+#include "mir_to_lir-inl.h"
+#include "object_utils.h"
+
+namespace art {
+
+/*
+ * Target-independent code generation.  Use only high-level
+ * load/store utilities here, or target-dependent genXX() handlers
+ * when necessary.
+ */
+void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list)
+{
+  RegLocation rl_src[3];
+  RegLocation rl_dest = mir_graph_->GetBadLoc();
+  RegLocation rl_result = mir_graph_->GetBadLoc();
+  Instruction::Code opcode = mir->dalvikInsn.opcode;
+  int opt_flags = mir->optimization_flags;
+  uint32_t vB = mir->dalvikInsn.vB;
+  uint32_t vC = mir->dalvikInsn.vC;
+
+  // Prep Src and Dest locations.
+  int next_sreg = 0;
+  int next_loc = 0;
+  int attrs = mir_graph_->oat_data_flow_attributes_[opcode];
+  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
+  if (attrs & DF_UA) {
+    if (attrs & DF_A_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+      next_sreg+= 2;
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+      next_sreg++;
+    }
+  }
+  if (attrs & DF_UB) {
+    if (attrs & DF_B_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+      next_sreg+= 2;
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+      next_sreg++;
+    }
+  }
+  if (attrs & DF_UC) {
+    if (attrs & DF_C_WIDE) {
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
+    } else {
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
+    }
+  }
+  if (attrs & DF_DA) {
+    if (attrs & DF_A_WIDE) {
+      rl_dest = mir_graph_->GetDestWide(mir);
+    } else {
+      rl_dest = mir_graph_->GetDest(mir);
+    }
+  }
+  switch (opcode) {
+    case Instruction::NOP:
+      break;
+
+    case Instruction::MOVE_EXCEPTION:
+      GenMoveException(rl_dest);
+      break;
+
+    case Instruction::RETURN_VOID:
+      if (((cu_->access_flags & kAccConstructor) != 0) &&
+          cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
+                                                          cu_->class_def_idx)) {
+        GenMemBarrier(kStoreStore);
+      }
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
+      }
+      break;
+
+    case Instruction::RETURN:
+    case Instruction::RETURN_OBJECT:
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
+      }
+      StoreValue(GetReturn(cu_->shorty[0] == 'F'), rl_src[0]);
+      break;
+
+    case Instruction::RETURN_WIDE:
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
+      }
+      StoreValueWide(GetReturnWide(cu_->shorty[0] == 'D'), rl_src[0]);
+      break;
+
+    case Instruction::MOVE_RESULT_WIDE:
+      if (opt_flags & MIR_INLINED)
+        break;  // Nop - combined w/ previous invoke.
+      StoreValueWide(rl_dest, GetReturnWide(rl_dest.fp));
+      break;
+
+    case Instruction::MOVE_RESULT:
+    case Instruction::MOVE_RESULT_OBJECT:
+      if (opt_flags & MIR_INLINED)
+        break;  // Nop - combined w/ previous invoke.
+      StoreValue(rl_dest, GetReturn(rl_dest.fp));
+      break;
+
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_OBJECT_FROM16:
+      StoreValue(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::MOVE_WIDE:
+    case Instruction::MOVE_WIDE_16:
+    case Instruction::MOVE_WIDE_FROM16:
+      StoreValueWide(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::CONST:
+    case Instruction::CONST_4:
+    case Instruction::CONST_16:
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantNoClobber(rl_result.low_reg, vB);
+      StoreValue(rl_dest, rl_result);
+      if (vB == 0) {
+        Workaround7250540(rl_dest, rl_result.low_reg);
+      }
+      break;
+
+    case Instruction::CONST_HIGH16:
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantNoClobber(rl_result.low_reg, vB << 16);
+      StoreValue(rl_dest, rl_result);
+      if (vB == 0) {
+        Workaround7250540(rl_dest, rl_result.low_reg);
+      }
+      break;
+
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32:
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
+                           static_cast<int64_t>(static_cast<int32_t>(vB)));
+      StoreValueWide(rl_dest, rl_result);
+      break;
+
+    case Instruction::CONST_WIDE:
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg, mir->dalvikInsn.vB_wide);
+      StoreValueWide(rl_dest, rl_result);
+      break;
+
+    case Instruction::CONST_WIDE_HIGH16:
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
+                           static_cast<int64_t>(vB) << 48);
+      StoreValueWide(rl_dest, rl_result);
+      break;
+
+    case Instruction::MONITOR_ENTER:
+      GenMonitorEnter(opt_flags, rl_src[0]);
+      break;
+
+    case Instruction::MONITOR_EXIT:
+      GenMonitorExit(opt_flags, rl_src[0]);
+      break;
+
+    case Instruction::CHECK_CAST: {
+      GenCheckCast(mir->offset, vB, rl_src[0]);
+      break;
+    }
+    case Instruction::INSTANCE_OF:
+      GenInstanceof(vC, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::NEW_INSTANCE:
+      GenNewInstance(vB, rl_dest);
+      break;
+
+    case Instruction::THROW:
+      GenThrow(rl_src[0]);
+      break;
+
+    case Instruction::ARRAY_LENGTH:
+      int len_offset;
+      len_offset = mirror::Array::LengthOffset().Int32Value();
+      rl_src[0] = LoadValue(rl_src[0], kCoreReg);
+      GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg);
+      StoreValue(rl_dest, rl_result);
+      break;
+
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+      GenConstString(vB, rl_dest);
+      break;
+
+    case Instruction::CONST_CLASS:
+      GenConstClass(vB, rl_dest);
+      break;
+
+    case Instruction::FILL_ARRAY_DATA:
+      GenFillArrayData(vB, rl_src[0]);
+      break;
+
+    case Instruction::FILLED_NEW_ARRAY:
+      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
+                        false /* not range */));
+      break;
+
+    case Instruction::FILLED_NEW_ARRAY_RANGE:
+      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
+                        true /* range */));
+      break;
+
+    case Instruction::NEW_ARRAY:
+      GenNewArray(vC, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32:
+      if (bb->taken->start_offset <= mir->offset) {
+        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
+      } else {
+        OpUnconditionalBranch(&label_list[bb->taken->id]);
+      }
+      break;
+
+    case Instruction::PACKED_SWITCH:
+      GenPackedSwitch(mir, vB, rl_src[0]);
+      break;
+
+    case Instruction::SPARSE_SWITCH:
+      GenSparseSwitch(mir, vB, rl_src[0]);
+      break;
+
+    case Instruction::CMPL_FLOAT:
+    case Instruction::CMPG_FLOAT:
+    case Instruction::CMPL_DOUBLE:
+    case Instruction::CMPG_DOUBLE:
+      GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::CMP_LONG:
+      GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::IF_EQ:
+    case Instruction::IF_NE:
+    case Instruction::IF_LT:
+    case Instruction::IF_GE:
+    case Instruction::IF_GT:
+    case Instruction::IF_LE: {
+      LIR* taken = &label_list[bb->taken->id];
+      LIR* fall_through = &label_list[bb->fall_through->id];
+      bool backward_branch;
+      backward_branch = (bb->taken->start_offset <= mir->offset);
+      // Result known at compile time?
+      if (rl_src[0].is_const && rl_src[1].is_const) {
+        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
+                                       mir_graph_->ConstantValue(rl_src[1].orig_sreg));
+        if (is_taken && backward_branch) {
+          GenSuspendTest(opt_flags);
+        }
+        int id = is_taken ? bb->taken->id : bb->fall_through->id;
+        OpUnconditionalBranch(&label_list[id]);
+      } else {
+        if (backward_branch) {
+          GenSuspendTest(opt_flags);
+        }
+        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
+                                fall_through);
+      }
+      break;
+      }
+
+    case Instruction::IF_EQZ:
+    case Instruction::IF_NEZ:
+    case Instruction::IF_LTZ:
+    case Instruction::IF_GEZ:
+    case Instruction::IF_GTZ:
+    case Instruction::IF_LEZ: {
+      LIR* taken = &label_list[bb->taken->id];
+      LIR* fall_through = &label_list[bb->fall_through->id];
+      bool backward_branch;
+      backward_branch = (bb->taken->start_offset <= mir->offset);
+      // Result known at compile time?
+      if (rl_src[0].is_const) {
+        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
+        if (is_taken && backward_branch) {
+          GenSuspendTest(opt_flags);
+        }
+        int id = is_taken ? bb->taken->id : bb->fall_through->id;
+        OpUnconditionalBranch(&label_list[id]);
+      } else {
+        if (backward_branch) {
+          GenSuspendTest(opt_flags);
+        }
+        GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+      }
+      break;
+      }
+
+    case Instruction::AGET_WIDE:
+      GenArrayGet(opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
+      break;
+    case Instruction::AGET:
+    case Instruction::AGET_OBJECT:
+      GenArrayGet(opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
+      break;
+    case Instruction::AGET_BOOLEAN:
+      GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      break;
+    case Instruction::AGET_BYTE:
+      GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      break;
+    case Instruction::AGET_CHAR:
+      GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      break;
+    case Instruction::AGET_SHORT:
+      GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      break;
+    case Instruction::APUT_WIDE:
+      GenArrayPut(opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
+      break;
+    case Instruction::APUT:
+      GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
+      break;
+    case Instruction::APUT_OBJECT:
+      GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
+      break;
+    case Instruction::APUT_SHORT:
+    case Instruction::APUT_CHAR:
+      GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
+      break;
+    case Instruction::APUT_BYTE:
+    case Instruction::APUT_BOOLEAN:
+      GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
+            rl_src[0], 0);
+      break;
+
+    case Instruction::IGET_OBJECT:
+      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+      break;
+
+    case Instruction::IGET_WIDE:
+      GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+      break;
+
+    case Instruction::IGET:
+      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+      break;
+
+    case Instruction::IGET_CHAR:
+      GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+      break;
+
+    case Instruction::IGET_SHORT:
+      GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+      break;
+
+    case Instruction::IGET_BOOLEAN:
+    case Instruction::IGET_BYTE:
+      GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+      break;
+
+    case Instruction::IPUT_WIDE:
+      GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+      break;
+
+    case Instruction::IPUT_OBJECT:
+      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+      break;
+
+    case Instruction::IPUT:
+      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+      break;
+
+    case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BYTE:
+      GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+      break;
+
+    case Instruction::IPUT_CHAR:
+      GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+      break;
+
+    case Instruction::IPUT_SHORT:
+      GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+      break;
+
+    case Instruction::SGET_OBJECT:
+      GenSget(vB, rl_dest, false, true);
+      break;
+    case Instruction::SGET:
+    case Instruction::SGET_BOOLEAN:
+    case Instruction::SGET_BYTE:
+    case Instruction::SGET_CHAR:
+    case Instruction::SGET_SHORT:
+      GenSget(vB, rl_dest, false, false);
+      break;
+
+    case Instruction::SGET_WIDE:
+      GenSget(vB, rl_dest, true, false);
+      break;
+
+    case Instruction::SPUT_OBJECT:
+      GenSput(vB, rl_src[0], false, true);
+      break;
+
+    case Instruction::SPUT:
+    case Instruction::SPUT_BOOLEAN:
+    case Instruction::SPUT_BYTE:
+    case Instruction::SPUT_CHAR:
+    case Instruction::SPUT_SHORT:
+      GenSput(vB, rl_src[0], false, false);
+      break;
+
+    case Instruction::SPUT_WIDE:
+      GenSput(vB, rl_src[0], true, false);
+      break;
+
+    case Instruction::INVOKE_STATIC_RANGE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
+      break;
+    case Instruction::INVOKE_STATIC:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
+      break;
+
+    case Instruction::INVOKE_DIRECT:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
+      break;
+    case Instruction::INVOKE_DIRECT_RANGE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
+      break;
+
+    case Instruction::INVOKE_VIRTUAL:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
+      break;
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
+      break;
+
+    case Instruction::INVOKE_SUPER:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
+      break;
+    case Instruction::INVOKE_SUPER_RANGE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
+      break;
+
+    case Instruction::INVOKE_INTERFACE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
+      break;
+    case Instruction::INVOKE_INTERFACE_RANGE:
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
+      break;
+
+    case Instruction::NEG_INT:
+    case Instruction::NOT_INT:
+      GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
+      break;
+
+    case Instruction::NEG_LONG:
+    case Instruction::NOT_LONG:
+      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
+      break;
+
+    case Instruction::NEG_FLOAT:
+      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
+      break;
+
+    case Instruction::NEG_DOUBLE:
+      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_LONG:
+      GenIntToLong(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::LONG_TO_INT:
+      rl_src[0] = UpdateLocWide(rl_src[0]);
+      rl_src[0] = WideToNarrow(rl_src[0]);
+      StoreValue(rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_BYTE:
+    case Instruction::INT_TO_SHORT:
+    case Instruction::INT_TO_CHAR:
+      GenIntNarrowing(opcode, rl_dest, rl_src[0]);
+      break;
+
+    case Instruction::INT_TO_FLOAT:
+    case Instruction::INT_TO_DOUBLE:
+    case Instruction::LONG_TO_FLOAT:
+    case Instruction::LONG_TO_DOUBLE:
+    case Instruction::FLOAT_TO_INT:
+    case Instruction::FLOAT_TO_LONG:
+    case Instruction::FLOAT_TO_DOUBLE:
+    case Instruction::DOUBLE_TO_INT:
+    case Instruction::DOUBLE_TO_LONG:
+    case Instruction::DOUBLE_TO_FLOAT:
+      GenConversion(opcode, rl_dest, rl_src[0]);
+      break;
+
+
+    case Instruction::ADD_INT:
+    case Instruction::ADD_INT_2ADDR:
+    case Instruction::MUL_INT:
+    case Instruction::MUL_INT_2ADDR:
+    case Instruction::AND_INT:
+    case Instruction::AND_INT_2ADDR:
+    case Instruction::OR_INT:
+    case Instruction::OR_INT_2ADDR:
+    case Instruction::XOR_INT:
+    case Instruction::XOR_INT_2ADDR:
+      if (rl_src[0].is_const &&
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[1],
+                             mir_graph_->ConstantValue(rl_src[0].orig_sreg));
+      } else if (rl_src[1].is_const &&
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[0],
+                             mir_graph_->ConstantValue(rl_src[1].orig_sreg));
+      } else {
+        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
+      }
+      break;
+
+    case Instruction::SUB_INT:
+    case Instruction::SUB_INT_2ADDR:
+    case Instruction::DIV_INT:
+    case Instruction::DIV_INT_2ADDR:
+    case Instruction::REM_INT:
+    case Instruction::REM_INT_2ADDR:
+    case Instruction::SHL_INT:
+    case Instruction::SHL_INT_2ADDR:
+    case Instruction::SHR_INT:
+    case Instruction::SHR_INT_2ADDR:
+    case Instruction::USHR_INT:
+    case Instruction::USHR_INT_2ADDR:
+      if (rl_src[1].is_const &&
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
+      } else {
+        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
+      }
+      break;
+
+    case Instruction::ADD_LONG:
+    case Instruction::SUB_LONG:
+    case Instruction::AND_LONG:
+    case Instruction::OR_LONG:
+    case Instruction::XOR_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+    case Instruction::SUB_LONG_2ADDR:
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::OR_LONG_2ADDR:
+    case Instruction::XOR_LONG_2ADDR:
+      if (rl_src[0].is_const || rl_src[1].is_const) {
+        GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+        break;
+      }
+      // Note: intentional fallthrough.
+
+    case Instruction::MUL_LONG:
+    case Instruction::DIV_LONG:
+    case Instruction::REM_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+    case Instruction::DIV_LONG_2ADDR:
+    case Instruction::REM_LONG_2ADDR:
+      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::SHL_LONG:
+    case Instruction::SHR_LONG:
+    case Instruction::USHR_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+    case Instruction::SHR_LONG_2ADDR:
+    case Instruction::USHR_LONG_2ADDR:
+      if (rl_src[1].is_const) {
+        GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+      } else {
+        GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+      }
+      break;
+
+    case Instruction::ADD_FLOAT:
+    case Instruction::SUB_FLOAT:
+    case Instruction::MUL_FLOAT:
+    case Instruction::DIV_FLOAT:
+    case Instruction::REM_FLOAT:
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT_2ADDR:
+      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::ADD_DOUBLE:
+    case Instruction::SUB_DOUBLE:
+    case Instruction::MUL_DOUBLE:
+    case Instruction::DIV_DOUBLE:
+    case Instruction::REM_DOUBLE:
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE_2ADDR:
+      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
+      break;
+
+    case Instruction::RSUB_INT:
+    case Instruction::ADD_INT_LIT16:
+    case Instruction::MUL_INT_LIT16:
+    case Instruction::DIV_INT_LIT16:
+    case Instruction::REM_INT_LIT16:
+    case Instruction::AND_INT_LIT16:
+    case Instruction::OR_INT_LIT16:
+    case Instruction::XOR_INT_LIT16:
+    case Instruction::ADD_INT_LIT8:
+    case Instruction::RSUB_INT_LIT8:
+    case Instruction::MUL_INT_LIT8:
+    case Instruction::DIV_INT_LIT8:
+    case Instruction::REM_INT_LIT8:
+    case Instruction::AND_INT_LIT8:
+    case Instruction::OR_INT_LIT8:
+    case Instruction::XOR_INT_LIT8:
+    case Instruction::SHL_INT_LIT8:
+    case Instruction::SHR_INT_LIT8:
+    case Instruction::USHR_INT_LIT8:
+      GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+}
+
+// Process extended MIR instructions
+void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir)
+{
+  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
+    case kMirOpCopy: {
+      RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
+      RegLocation rl_dest = mir_graph_->GetDest(mir);
+      StoreValue(rl_dest, rl_src);
+      break;
+    }
+    case kMirOpFusedCmplFloat:
+      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
+      break;
+    case kMirOpFusedCmpgFloat:
+      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
+      break;
+    case kMirOpFusedCmplDouble:
+      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
+      break;
+    case kMirOpFusedCmpgDouble:
+      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
+      break;
+    case kMirOpFusedCmpLong:
+      GenFusedLongCmpBranch(bb, mir);
+      break;
+    case kMirOpSelect:
+      GenSelect(bb, mir);
+      break;
+    default:
+      break;
+  }
+}
+
+// Handle the content in each basic block.
+bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) return false;
+  current_dalvik_offset_ = bb->start_offset;
+  MIR* mir;
+  int block_id = bb->id;
+
+  block_label_list_[block_id].operands[0] = bb->start_offset;
+
+  // Insert the block label.
+  block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
+  AppendLIR(&block_label_list_[block_id]);
+
+  LIR* head_lir = NULL;
+
+  // If this is a catch block, export the start address.
+  if (bb->catch_entry) {
+    head_lir = NewLIR0(kPseudoExportedPC);
+  }
+
+  // Free temp registers and reset redundant store tracking.
+  ResetRegPool();
+  ResetDefTracking();
+
+  ClobberAllRegs();
+
+  if (bb->block_type == kEntryBlock) {
+    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+    GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
+                         mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
+  } else if (bb->block_type == kExitBlock) {
+    GenExitSequence();
+  }
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    ResetRegPool();
+    if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
+      ClobberAllRegs();
+    }
+
+    if (cu_->disable_opt & (1 << kSuppressLoads)) {
+      ResetDefTracking();
+    }
+
+    // Reset temp tracking sanity check.
+    if (kIsDebugBuild) {
+      live_sreg_ = INVALID_SREG;
+    }
+
+    current_dalvik_offset_ = mir->offset;
+    int opcode = mir->dalvikInsn.opcode;
+    LIR* boundary_lir;
+
+    // Mark the beginning of a Dalvik instruction for line tracking.
+    char* inst_str = cu_->verbose ?
+       mir_graph_->GetDalvikDisassembly(mir) : NULL;
+    boundary_lir = MarkBoundary(mir->offset, inst_str);
+    // Remember the first LIR for this block.
+    if (head_lir == NULL) {
+      head_lir = boundary_lir;
+      // Set the first boundary_lir as a scheduling barrier.
+      head_lir->def_mask = ENCODE_ALL;
+    }
+
+    if (opcode == kMirOpCheck) {
+      // Combine check and work halves of throwing instruction.
+      MIR* work_half = mir->meta.throw_insn;
+      mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+      opcode = work_half->dalvikInsn.opcode;
+      SSARepresentation* ssa_rep = work_half->ssa_rep;
+      work_half->ssa_rep = mir->ssa_rep;
+      mir->ssa_rep = ssa_rep;
+      work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
+    }
+
+    if (opcode >= kMirOpFirst) {
+      HandleExtendedMethodMIR(bb, mir);
+      continue;
+    }
+
+    CompileDalvikInstruction(mir, bb, block_label_list_);
+  }
+
+  if (head_lir) {
+    // Eliminate redundant loads/stores and delay stores into later slots.
+    ApplyLocalOptimizations(head_lir, last_lir_insn_);
+
+    // Generate an unconditional branch to the fallthrough block.
+    if (bb->fall_through) {
+      OpUnconditionalBranch(&block_label_list_[bb->fall_through->id]);
+    }
+  }
+  return false;
+}
+
+void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case)
+{
+  // Find the first DalvikByteCode block.
+  int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
+  BasicBlock*bb = NULL;
+  for (int idx = 0; idx < num_reachable_blocks; idx++) {
+    // TODO: no direct access of growable lists.
+    int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
+    bb = mir_graph_->GetBasicBlock(dfs_index);
+    if (bb->block_type == kDalvikByteCode) {
+      break;
+    }
+  }
+  if (bb == NULL) {
+    return;
+  }
+  DCHECK_EQ(bb->start_offset, 0);
+  DCHECK(bb->first_mir_insn != NULL);
+
+  // Get the first instruction.
+  MIR* mir = bb->first_mir_insn;
+
+  // Free temp registers and reset redundant store tracking.
+  ResetRegPool();
+  ResetDefTracking();
+  ClobberAllRegs();
+
+  GenSpecialCase(bb, mir, special_case);
+}
+
+void Mir2Lir::MethodMIR2LIR()
+{
+  // Hold the labels of each block.
+  block_label_list_ =
+      static_cast<LIR*>(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true,
+                                       ArenaAllocator::kAllocLIR));
+
+  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    MethodBlockCodeGen(bb);
+  }
+
+  HandleSuspendLaunchPads();
+
+  HandleThrowLaunchPads();
+
+  HandleIntrinsicLaunchPads();
+
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations))) {
+    RemoveRedundantBranches();
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
new file mode 100644
index 0000000..47514f7
--- /dev/null
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -0,0 +1,779 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
+
+#include "invoke_type.h"
+#include "compiled_method.h"
+#include "dex/compiler_enums.h"
+#include "dex/compiler_ir.h"
+#include "dex/backend.h"
+#include "dex/growable_array.h"
+#include "dex/arena_allocator.h"
+#include "driver/compiler_driver.h"
+#include "safe_map.h"
+
+namespace art {
+
+// Set to 1 to measure cost of suspend check.
+#define NO_SUSPEND 0
+
+#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
+#define IS_BRANCH            (1ULL << kIsBranch)
+#define IS_IT                (1ULL << kIsIT)
+#define IS_LOAD              (1ULL << kMemLoad)
+#define IS_QUAD_OP           (1ULL << kIsQuadOp)
+#define IS_QUIN_OP           (1ULL << kIsQuinOp)
+#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
+#define IS_STORE             (1ULL << kMemStore)
+#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
+#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
+#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
+#define NO_OPERAND           (1ULL << kNoOperand)
+#define REG_DEF0             (1ULL << kRegDef0)
+#define REG_DEF1             (1ULL << kRegDef1)
+#define REG_DEFA             (1ULL << kRegDefA)
+#define REG_DEFD             (1ULL << kRegDefD)
+#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
+#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
+#define REG_DEF_LIST0        (1ULL << kRegDefList0)
+#define REG_DEF_LIST1        (1ULL << kRegDefList1)
+#define REG_DEF_LR           (1ULL << kRegDefLR)
+#define REG_DEF_SP           (1ULL << kRegDefSP)
+#define REG_USE0             (1ULL << kRegUse0)
+#define REG_USE1             (1ULL << kRegUse1)
+#define REG_USE2             (1ULL << kRegUse2)
+#define REG_USE3             (1ULL << kRegUse3)
+#define REG_USE4             (1ULL << kRegUse4)
+#define REG_USEA             (1ULL << kRegUseA)
+#define REG_USEC             (1ULL << kRegUseC)
+#define REG_USED             (1ULL << kRegUseD)
+#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
+#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
+#define REG_USE_LIST0        (1ULL << kRegUseList0)
+#define REG_USE_LIST1        (1ULL << kRegUseList1)
+#define REG_USE_LR           (1ULL << kRegUseLR)
+#define REG_USE_PC           (1ULL << kRegUsePC)
+#define REG_USE_SP           (1ULL << kRegUseSP)
+#define SETS_CCODES          (1ULL << kSetsCCodes)
+#define USES_CCODES          (1ULL << kUsesCCodes)
+
+// Common combo register usage patterns.
+#define REG_DEF01            (REG_DEF0 | REG_DEF1)
+#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
+#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
+#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
+#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
+#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
+#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
+#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
+#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
+#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
+#define REG_USE012           (REG_USE01 | REG_USE2)
+#define REG_USE014           (REG_USE01 | REG_USE4)
+#define REG_USE01            (REG_USE0 | REG_USE1)
+#define REG_USE02            (REG_USE0 | REG_USE2)
+#define REG_USE12            (REG_USE1 | REG_USE2)
+#define REG_USE23            (REG_USE2 | REG_USE3)
+
+struct BasicBlock;
+struct CallInfo;
+struct CompilationUnit;
+struct MIR;
+struct RegLocation;
+struct RegisterInfo;
+class MIRGraph;
+class Mir2Lir;
+
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
+                            const MethodReference& target_method,
+                            uint32_t method_idx, uintptr_t direct_code,
+                            uintptr_t direct_method, InvokeType type);
+
+typedef std::vector<uint8_t> CodeBuffer;
+
+
+struct LIR {
+  int offset;               // Offset of this instruction.
+  int dalvik_offset;        // Offset of Dalvik opcode.
+  LIR* next;
+  LIR* prev;
+  LIR* target;
+  int opcode;
+  int operands[5];          // [0..4] = [dest, src1, src2, extra, extra2].
+  struct {
+    bool is_nop:1;          // LIR is optimized away.
+    bool pcRelFixup:1;      // May need pc-relative fixup.
+    unsigned int size:5;    // Note: size is in bytes.
+    unsigned int unused:25;
+  } flags;
+  int alias_info;           // For Dalvik register & litpool disambiguation.
+  uint64_t use_mask;        // Resource mask for use.
+  uint64_t def_mask;        // Resource mask for def.
+};
+
+// Target-specific initialization.
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+
+// Utility macros to traverse the LIR list.
+#define NEXT_LIR(lir) (lir->next)
+#define PREV_LIR(lir) (lir->prev)
+
+// Defines for alias_info (tracks Dalvik register references).
+#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
+#define DECODE_ALIAS_INFO_WIDE_FLAG     (0x80000000)
+#define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
+#define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
+
+// Common resource macros.
+#define ENCODE_CCODE            (1ULL << kCCode)
+#define ENCODE_FP_STATUS        (1ULL << kFPStatus)
+
+// Abstract memory locations.
+#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
+#define ENCODE_LITERAL          (1ULL << kLiteral)
+#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
+#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
+
+#define ENCODE_ALL              (~0ULL)
+#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
+                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+//TODO: replace these macros
+#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
+#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
+#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
+#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
+#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))
+#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
+
+class Mir2Lir : public Backend {
+
+  public:
+    struct SwitchTable {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int vaddr;                  // Dalvik offset of switch opcode.
+      LIR* anchor;                // Reference instruction for relative offsets.
+      LIR** targets;              // Array of case targets.
+    };
+
+    struct FillArrayData {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int size;
+      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
+    };
+
+    /* Static register use counts */
+    struct RefCounts {
+      int count;
+      int s_reg;
+      bool double_start;   // Starting v_reg for a double
+    };
+
+    /*
+     * Data structure tracking the mapping between a Dalvik register (pair) and a
+     * native register (pair). The idea is to reuse the previously loaded value
+     * if possible, otherwise to keep the value in a native register as long as
+     * possible.
+     */
+    struct RegisterInfo {
+      int reg;                    // Reg number
+      bool in_use;                // Has it been allocated?
+      bool is_temp;               // Can allocate as temp?
+      bool pair;                  // Part of a register pair?
+      int partner;                // If pair, other reg of pair.
+      bool live;                  // Is there an associated SSA name?
+      bool dirty;                 // If live, is it dirty?
+      int s_reg;                  // Name of live value.
+      LIR *def_start;             // Starting inst in last def sequence.
+      LIR *def_end;               // Ending inst in last def sequence.
+    };
+
+   struct RegisterPool {
+      int num_core_regs;
+      RegisterInfo *core_regs;
+      int next_core_reg;
+      int num_fp_regs;
+      RegisterInfo *FPRegs;
+      int next_fp_reg;
+    };
+
+    struct PromotionMap {
+      RegLocationType core_location:3;
+      uint8_t core_reg;
+      RegLocationType fp_location:3;
+      uint8_t FpReg;
+      bool first_in_pair;
+    };
+
+    virtual ~Mir2Lir(){};
+
+    int32_t s4FromSwitchData(const void* switch_data) {
+      return *reinterpret_cast<const int32_t*>(switch_data);
+    }
+
+    RegisterClass oat_reg_class_by_size(OpSize size) {
+      return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
+              size == kSignedByte ) ? kCoreReg : kAnyReg;
+    }
+
+    size_t CodeBufferSizeInBytes() {
+      return code_buffer_.size() / sizeof(code_buffer_[0]);
+    }
+
+    // Shared by all targets - implemented in codegen_util.cc
+    void AppendLIR(LIR* lir);
+    void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
+    void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
+
+    int ComputeFrameSize();
+    virtual void Materialize();
+    virtual CompiledMethod* GetCompiledMethod();
+    void MarkSafepointPC(LIR* inst);
+    bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
+    void SetupResourceMasks(LIR* lir);
+    void AssembleLIR();
+    void SetMemRefType(LIR* lir, bool is_load, int mem_type);
+    void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
+    void SetupRegMask(uint64_t* mask, int reg);
+    void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+    void DumpPromotionMap();
+    void CodegenDump();
+    LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+    LIR* NewLIR0(int opcode);
+    LIR* NewLIR1(int opcode, int dest);
+    LIR* NewLIR2(int opcode, int dest, int src1);
+    LIR* NewLIR3(int opcode, int dest, int src1, int src2);
+    LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
+    LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
+    LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
+    LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+    LIR* AddWordData(LIR* *constant_list_p, int value);
+    LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
+    void ProcessSwitchTables();
+    void DumpSparseSwitchTable(const uint16_t* table);
+    void DumpPackedSwitchTable(const uint16_t* table);
+    LIR* MarkBoundary(int offset, const char* inst_str);
+    void NopLIR(LIR* lir);
+    bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
+    bool IsInexpensiveConstant(RegLocation rl_src);
+    ConditionCode FlipComparisonOrder(ConditionCode before);
+    void DumpMappingTable(const char* table_name, const std::string& descriptor,
+                          const std::string& name, const std::string& signature,
+                          const std::vector<uint32_t>& v);
+    void InstallLiteralPools();
+    void InstallSwitchTables();
+    void InstallFillArrayData();
+    bool VerifyCatchEntries();
+    void CreateMappingTables();
+    void CreateNativeGcMap();
+    int AssignLiteralOffset(int offset);
+    int AssignSwitchTablesOffset(int offset);
+    int AssignFillArrayDataOffset(int offset);
+    int AssignInsnOffsets();
+    void AssignOffsets();
+    LIR* InsertCaseLabel(int vaddr, int keyVal);
+    void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+    void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+
+    // Shared by all targets - implemented in local_optimizations.cc
+    void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
+    void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
+    void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
+    void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
+    void RemoveRedundantBranches();
+
+    // Shared by all targets - implemented in ralloc_util.cc
+    int GetSRegHi(int lowSreg);
+    bool oat_live_out(int s_reg);
+    int oatSSASrc(MIR* mir, int num);
+    void SimpleRegAlloc();
+    void ResetRegPool();
+    void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
+    void DumpRegPool(RegisterInfo* p, int num_regs);
+    void DumpCoreRegPool();
+    void DumpFpRegPool();
+    /* Mark a temp register as dead.  Does not affect allocation state. */
+    void Clobber(int reg) {
+      ClobberBody(GetRegInfo(reg));
+    }
+    void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
+    void ClobberSReg(int s_reg);
+    int SRegToPMap(int s_reg);
+    void RecordCorePromotion(int reg, int s_reg);
+    int AllocPreservedCoreReg(int s_reg);
+    void RecordFpPromotion(int reg, int s_reg);
+    int AllocPreservedSingle(int s_reg, bool even);
+    int AllocPreservedDouble(int s_reg);
+    int AllocPreservedFPReg(int s_reg, bool double_start);
+    int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
+                      bool required);
+    int AllocTempDouble();
+    int AllocFreeTemp();
+    int AllocTemp();
+    int AllocTempFloat();
+    RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg);
+    RegisterInfo* AllocLive(int s_reg, int reg_class);
+    void FreeTemp(int reg);
+    RegisterInfo* IsLive(int reg);
+    RegisterInfo* IsTemp(int reg);
+    RegisterInfo* IsPromoted(int reg);
+    bool IsDirty(int reg);
+    void LockTemp(int reg);
+    void ResetDef(int reg);
+    void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
+    void MarkDef(RegLocation rl, LIR *start, LIR *finish);
+    void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
+    RegLocation WideToNarrow(RegLocation rl);
+    void ResetDefLoc(RegLocation rl);
+    void ResetDefLocWide(RegLocation rl);
+    void ResetDefTracking();
+    void ClobberAllRegs();
+    void FlushAllRegsBody(RegisterInfo* info, int num_regs);
+    void FlushAllRegs();
+    bool RegClassMatches(int reg_class, int reg);
+    void MarkLive(int reg, int s_reg);
+    void MarkTemp(int reg);
+    void UnmarkTemp(int reg);
+    void MarkPair(int low_reg, int high_reg);
+    void MarkClean(RegLocation loc);
+    void MarkDirty(RegLocation loc);
+    void MarkInUse(int reg);
+    void CopyRegInfo(int new_reg, int old_reg);
+    bool CheckCorePoolSanity();
+    RegLocation UpdateLoc(RegLocation loc);
+    RegLocation UpdateLocWide(RegLocation loc);
+    RegLocation UpdateRawLoc(RegLocation loc);
+    RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
+    RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
+    void CountRefs(RefCounts* core_counts, RefCounts* fp_counts);
+    void DumpCounts(const RefCounts* arr, int size, const char* msg);
+    void DoPromotion();
+    int VRegOffset(int v_reg);
+    int SRegOffset(int s_reg);
+    RegLocation GetReturnWide(bool is_double);
+    RegLocation GetReturn(bool is_float);
+
+    // Shared by all targets - implemented in gen_common.cc.
+    bool HandleEasyDivide(Instruction::Code dalvik_opcode,
+                          RegLocation rl_src, RegLocation rl_dest, int lit);
+    bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
+    void HandleSuspendLaunchPads();
+    void HandleIntrinsicLaunchPads();
+    void HandleThrowLaunchPads();
+    void GenBarrier();
+    LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
+    LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
+                       ThrowKind kind);
+    LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags);
+    LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
+                        ThrowKind kind);
+    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
+                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
+    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
+                                 LIR* taken, LIR* fall_through);
+    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src);
+    void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
+                     RegLocation rl_src);
+    void GenFilledNewArray(CallInfo* info);
+    void GenSput(uint32_t field_idx, RegLocation rl_src,
+                 bool is_long_or_double, bool is_object);
+    void GenSget(uint32_t field_idx, RegLocation rl_dest,
+                 bool is_long_or_double, bool is_object);
+    void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
+    void GenConstString(uint32_t string_idx, RegLocation rl_dest);
+    void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
+    void GenThrow(RegLocation rl_src);
+    void GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
+                       RegLocation rl_src);
+    void GenCheckCast(uint32_t insn_idx, uint32_t type_idx,
+                      RegLocation rl_src);
+    void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
+                      RegLocation rl_src1, RegLocation rl_src2);
+    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_shift);
+    void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
+                       RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src, int lit);
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_src2);
+    void GenConversionCall(int func_offset, RegLocation rl_dest,
+                           RegLocation rl_src);
+    void GenSuspendTest(int opt_flags);
+    void GenSuspendTestAndBranch(int opt_flags, LIR* target);
+
+    // Shared by all targets - implemented in gen_invoke.cc.
+    int CallHelperSetup(int helper_offset);
+    LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc);
+    void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+                                         RegLocation arg1, bool safepoint_pc);
+    void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0,
+                                         int arg1, bool safepoint_pc);
+    void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmMethod(int helper_offset, int arg0,
+                                    bool safepoint_pc);
+    void CallRuntimeHelperRegLocationRegLocation(int helper_offset,
+                                                 RegLocation arg0, RegLocation arg1,
+                                                 bool safepoint_pc);
+    void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+                                    int arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0,
+                                               RegLocation arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+                                                    int arg0, RegLocation arg1, RegLocation arg2,
+                                                    bool safepoint_pc);
+    void GenInvoke(CallInfo* info);
+    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
+    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                             NextCallInsn next_call_insn,
+                             const MethodReference& target_method,
+                             uint32_t vtable_idx,
+                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                             bool skip_this);
+    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                           NextCallInsn next_call_insn,
+                           const MethodReference& target_method,
+                           uint32_t vtable_idx,
+                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                           bool skip_this);
+    RegLocation InlineTarget(CallInfo* info);
+    RegLocation InlineTargetWide(CallInfo* info);
+
+    bool GenInlinedCharAt(CallInfo* info);
+    bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
+    bool GenInlinedAbsInt(CallInfo* info);
+    bool GenInlinedAbsLong(CallInfo* info);
+    bool GenInlinedFloatCvt(CallInfo* info);
+    bool GenInlinedDoubleCvt(CallInfo* info);
+    bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
+    bool GenInlinedStringCompareTo(CallInfo* info);
+    bool GenInlinedCurrentThread(CallInfo* info);
+    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+    bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
+                             bool is_volatile, bool is_ordered);
+    bool GenIntrinsic(CallInfo* info);
+    int LoadArgRegs(CallInfo* info, int call_state,
+                    NextCallInsn next_call_insn,
+                    const MethodReference& target_method,
+                    uint32_t vtable_idx,
+                    uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                    bool skip_this);
+
+    // Shared by all targets - implemented in gen_loadstore.cc.
+    RegLocation LoadCurrMethod();
+    void LoadCurrMethodDirect(int r_tgt);
+    LIR* LoadConstant(int r_dest, int value);
+    LIR* LoadWordDisp(int rBase, int displacement, int r_dest);
+    RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
+    RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
+    void LoadValueDirect(RegLocation rl_src, int r_dest);
+    void LoadValueDirectFixed(RegLocation rl_src, int r_dest);
+    void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi);
+    void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi);
+    LIR* StoreWordDisp(int rBase, int displacement, int r_src);
+    void StoreValue(RegLocation rl_dest, RegLocation rl_src);
+    void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);
+
+    // Shared by all targets - implemented in mir_to_lir.cc.
+    void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
+    void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
+    bool MethodBlockCodeGen(BasicBlock* bb);
+    void SpecialMIR2LIR(SpecialCaseHandler special_case);
+    void MethodMIR2LIR();
+
+
+
+    // Required for target - codegen helpers.
+    virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+    virtual int LoadHelper(int offset) = 0;
+    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
+    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg) = 0;
+    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0;
+    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0;
+    virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0;
+    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0;
+    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0;
+    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0;
+    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0;
+    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg) = 0;
+    virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0;
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg) = 0;
+    virtual bool SameRegType(int reg1, int reg2) = 0;
+    virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
+    virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
+    virtual int S2d(int low_reg, int high_reg) = 0;
+    virtual int TargetReg(SpecialTargetRegister reg) = 0;
+    virtual RegisterInfo* GetRegInfo(int reg) = 0;
+    virtual RegLocation GetReturnAlt() = 0;
+    virtual RegLocation GetReturnWideAlt() = 0;
+    virtual RegLocation LocCReturn() = 0;
+    virtual RegLocation LocCReturnDouble() = 0;
+    virtual RegLocation LocCReturnFloat() = 0;
+    virtual RegLocation LocCReturnWide() = 0;
+    virtual uint32_t FpRegMask() = 0;
+    virtual uint64_t GetRegMaskCommon(int reg) = 0;
+    virtual void AdjustSpillMask() = 0;
+    virtual void ClobberCalleeSave() = 0;
+    virtual void FlushReg(int reg) = 0;
+    virtual void FlushRegWide(int reg1, int reg2) = 0;
+    virtual void FreeCallTemps() = 0;
+    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0;
+    virtual void LockCallTemps() = 0;
+    virtual void MarkPreservedSingle(int v_reg, int reg) = 0;
+    virtual void CompilerInitializeRegAlloc() = 0;
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr) = 0;
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
+    virtual void SetupTargetResourceMasks(LIR* lir) = 0;
+    virtual const char* GetTargetInstFmt(int opcode) = 0;
+    virtual const char* GetTargetInstName(int opcode) = 0;
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
+    virtual uint64_t GetPCUseDefEncoding() = 0;
+    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
+    virtual int GetInsnSize(LIR* lir) = 0;
+    virtual bool IsUnconditionalBranch(LIR* lir) = 0;
+
+    // Required for target - Dalvik-level generators.
+    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenArithOpDouble(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2) = 0;
+    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) = 0;
+    virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier) = 0;
+    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0;
+    virtual bool GenInlinedSqrt(CallInfo* info) = 0;
+    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2) = 0;
+    virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind) = 0;
+    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div) = 0;
+    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div) = 0;
+    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0;
+    virtual void GenEntrySequence(RegLocation* ArgLocs,
+                                  RegLocation rl_method) = 0;
+    virtual void GenExitSequence() = 0;
+    virtual void GenFillArrayData(uint32_t table_offset,
+                                  RegLocation rl_src) = 0;
+    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) = 0;
+    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0;
+    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMoveException(RegLocation rl_dest) = 0;
+    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit) = 0;
+    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case) = 0;
+    virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
+    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+    virtual void GenShiftImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1,
+                                   RegLocation rl_shift) = 0;
+
+    // Required for target - single operation generators.
+    virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2,
+                             LIR* target) = 0;
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
+                                LIR* target) = 0;
+    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg,
+                                LIR* target) = 0;
+    virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
+    virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
+    virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
+    virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0;
+    virtual LIR* OpReg(OpKind op, int r_dest_src) = 0;
+    virtual LIR* OpRegCopy(int r_dest, int r_src) = 0;
+    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0;
+    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0;
+    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
+    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
+    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
+                             int r_src2) = 0;
+    virtual LIR* OpTestSuspend(LIR* target) = 0;
+    virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0;
+    virtual LIR* OpVldm(int rBase, int count) = 0;
+    virtual LIR* OpVstm(int rBase, int count) = 0;
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale,
+                       int offset) = 0;
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
+                               int src_hi) = 0;
+    virtual void OpTlsCmp(int offset, int val) = 0;
+    virtual bool InexpensiveConstantInt(int32_t value) = 0;
+    virtual bool InexpensiveConstantFloat(int32_t value) = 0;
+    virtual bool InexpensiveConstantLong(int64_t value) = 0;
+    virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+
+    // Temp workaround
+    void Workaround7250540(RegLocation rl_dest, int value);
+
+  protected:
+    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+    CompilationUnit* GetCompilationUnit() {
+      return cu_;
+    }
+
+  private:
+    void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
+                            RegLocation rl_src);
+    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+                                    bool type_known_abstract, bool use_declaring_class,
+                                    bool can_assume_type_is_in_dex_cache,
+                                    uint32_t type_idx, RegLocation rl_dest,
+                                    RegLocation rl_src);
+
+    void ClobberBody(RegisterInfo* p);
+    void ResetDefBody(RegisterInfo* p) {
+      p->def_start = NULL;
+      p->def_end = NULL;
+    }
+
+  public:
+    // TODO: add accessors for these.
+    LIR* literal_list_;                        // Constants.
+    LIR* method_literal_list_;                 // Method literals requiring patching.
+    LIR* code_literal_list_;                   // Code literals requiring patching.
+
+  protected:
+    CompilationUnit* const cu_;
+    MIRGraph* const mir_graph_;
+    GrowableArray<SwitchTable*> switch_tables_;
+    GrowableArray<FillArrayData*> fill_array_data_;
+    GrowableArray<LIR*> throw_launchpads_;
+    GrowableArray<LIR*> suspend_launchpads_;
+    GrowableArray<LIR*> intrinsic_launchpads_;
+    SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
+    /*
+     * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
+     * Native PC is on the return address of the safepointed operation.  Dex PC is for
+     * the instruction being executed at the safepoint.
+     */
+    std::vector<uint32_t> pc2dex_mapping_table_;
+    /*
+     * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
+     * immediately preceed the instruction.
+     */
+    std::vector<uint32_t> dex2pc_mapping_table_;
+    int data_offset_;                     // starting offset of literal pool.
+    int total_size_;                      // header + code size.
+    LIR* block_label_list_;
+    PromotionMap* promotion_map_;
+    /*
+     * TODO: The code generation utilities don't have a built-in
+     * mechanism to propagate the original Dalvik opcode address to the
+     * associated generated instructions.  For the trace compiler, this wasn't
+     * necessary because the interpreter handled all throws and debugging
+     * requests.  For now we'll handle this by placing the Dalvik offset
+     * in the CompilationUnit struct before codegen for each instruction.
+     * The low-level LIR creation utilites will pull it from here.  Rework this.
+     */
+    int current_dalvik_offset_;
+    RegisterPool* reg_pool_;
+    /*
+     * Sanity checking for the register temp tracking.  The same ssa
+     * name should never be associated with one temp register per
+     * instruction compilation.
+     */
+    int live_sreg_;
+    CodeBuffer code_buffer_;
+    std::vector<uint32_t> combined_mapping_table_;
+    std::vector<uint32_t> core_vmap_table_;
+    std::vector<uint32_t> fp_vmap_table_;
+    std::vector<uint8_t> native_gc_map_;
+    int num_core_spills_;
+    int num_fp_spills_;
+    int frame_size_;
+    unsigned int core_spill_mask_;
+    unsigned int fp_spill_mask_;
+    LIR* first_lir_insn_;
+    LIR* last_lir_insn_;
+};  // Class Mir2Lir
+
+}  // namespace art
+
+#endif  //ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
new file mode 100644
index 0000000..8f43542
--- /dev/null
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -0,0 +1,1237 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains register alloction support. */
+
+#include "dex/compiler_ir.h"
+#include "dex/compiler_internals.h"
+#include "mir_to_lir-inl.h"
+
+namespace art {
+
+/*
+ * Free all allocated temps in the temp pools.  Note that this does
+ * not affect the "liveness" of a temp register, which will stay
+ * live until it is either explicitly killed or reallocated.
+ */
+void Mir2Lir::ResetRegPool()
+{
+  int i;
+  for (i=0; i < reg_pool_->num_core_regs; i++) {
+    if (reg_pool_->core_regs[i].is_temp)
+      reg_pool_->core_regs[i].in_use = false;
+  }
+  for (i=0; i < reg_pool_->num_fp_regs; i++) {
+    if (reg_pool_->FPRegs[i].is_temp)
+      reg_pool_->FPRegs[i].in_use = false;
+  }
+  // Reset temp tracking sanity check.
+  if (kIsDebugBuild) {
+    live_sreg_ = INVALID_SREG;
+  }
+}
+
+ /*
+  * Set up temp & preserved register pools specialized by target.
+  * Note: num_regs may be zero.
+  */
+void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num)
+{
+  int i;
+  for (i=0; i < num; i++) {
+    regs[i].reg = reg_nums[i];
+    regs[i].in_use = false;
+    regs[i].is_temp = false;
+    regs[i].pair = false;
+    regs[i].live = false;
+    regs[i].dirty = false;
+    regs[i].s_reg = INVALID_SREG;
+  }
+}
+
+void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs)
+{
+  LOG(INFO) << "================================================";
+  for (int i = 0; i < num_regs; i++) {
+    LOG(INFO) << StringPrintf(
+        "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+        p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
+        p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
+        reinterpret_cast<uintptr_t>(p[i].def_end));
+  }
+  LOG(INFO) << "================================================";
+}
+
+void Mir2Lir::DumpCoreRegPool()
+{
+  DumpRegPool(reg_pool_->core_regs, reg_pool_->num_core_regs);
+}
+
+void Mir2Lir::DumpFpRegPool()
+{
+  DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs);
+}
+
+void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
+{
+  int i;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].s_reg == s_reg) {
+      if (p[i].is_temp) {
+        p[i].live = false;
+      }
+      p[i].def_start = NULL;
+      p[i].def_end = NULL;
+    }
+  }
+}
+
+/*
+ * Break the association between a Dalvik vreg and a physical temp register of either register
+ * class.
+ * TODO: Ideally, the public version of this code should not exist.  Besides its local usage
+ * in the register utilities, is is also used by code gen routines to work around a deficiency in
+ * local register allocation, which fails to distinguish between the "in" and "out" identities
+ * of Dalvik vregs.  This can result in useless register copies when the same Dalvik vreg
+ * is used both as the source and destination register of an operation in which the type
+ * changes (for example: INT_TO_FLOAT v1, v1).  Revisit when improved register allocation is
+ * addressed.
+ */
+void Mir2Lir::ClobberSReg(int s_reg)
+{
+  /* Reset live temp tracking sanity checker */
+  if (kIsDebugBuild) {
+    if (s_reg == live_sreg_) {
+      live_sreg_ = INVALID_SREG;
+    }
+  }
+  ClobberSRegBody(reg_pool_->core_regs, reg_pool_->num_core_regs, s_reg);
+  ClobberSRegBody(reg_pool_->FPRegs, reg_pool_->num_fp_regs, s_reg);
+}
+
+/*
+ * SSA names associated with the initial definitions of Dalvik
+ * registers are the same as the Dalvik register number (and
+ * thus take the same position in the promotion_map.  However,
+ * the special Method* and compiler temp resisters use negative
+ * v_reg numbers to distinguish them and can have an arbitrary
+ * ssa name (above the last original Dalvik register).  This function
+ * maps SSA names to positions in the promotion_map array.
+ */
+int Mir2Lir::SRegToPMap(int s_reg)
+{
+  DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs());
+  DCHECK_GE(s_reg, 0);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  if (v_reg >= 0) {
+    DCHECK_LT(v_reg, cu_->num_dalvik_registers);
+    return v_reg;
+  } else {
+    int pos = std::abs(v_reg) - std::abs(SSA_METHOD_BASEREG);
+    DCHECK_LE(pos, cu_->num_compiler_temps);
+    return cu_->num_dalvik_registers + pos;
+  }
+}
+
+void Mir2Lir::RecordCorePromotion(int reg, int s_reg)
+{
+  int p_map_idx = SRegToPMap(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  GetRegInfo(reg)->in_use = true;
+  core_spill_mask_ |= (1 << reg);
+  // Include reg for later sort
+  core_vmap_table_.push_back(reg << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
+  num_core_spills_++;
+  promotion_map_[p_map_idx].core_location = kLocPhysReg;
+  promotion_map_[p_map_idx].core_reg = reg;
+}
+
+/* Reserve a callee-save register.  Return -1 if none available */
+int Mir2Lir::AllocPreservedCoreReg(int s_reg)
+{
+  int res = -1;
+  RegisterInfo* core_regs = reg_pool_->core_regs;
+  for (int i = 0; i < reg_pool_->num_core_regs; i++) {
+    if (!core_regs[i].is_temp && !core_regs[i].in_use) {
+      res = core_regs[i].reg;
+      RecordCorePromotion(res, s_reg);
+      break;
+    }
+  }
+  return res;
+}
+
+void Mir2Lir::RecordFpPromotion(int reg, int s_reg)
+{
+  int p_map_idx = SRegToPMap(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  GetRegInfo(reg)->in_use = true;
+  MarkPreservedSingle(v_reg, reg);
+  promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+  promotion_map_[p_map_idx].FpReg = reg;
+}
+
+/*
+ * Reserve a callee-save fp single register.  Try to fullfill request for
+ * even/odd  allocation, but go ahead and allocate anything if not
+ * available.  If nothing's available, return -1.
+ */
+int Mir2Lir::AllocPreservedSingle(int s_reg, bool even)
+{
+  int res = -1;
+  RegisterInfo* FPRegs = reg_pool_->FPRegs;
+  for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
+    if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
+      ((FPRegs[i].reg & 0x1) == 0) == even) {
+      res = FPRegs[i].reg;
+      RecordFpPromotion(res, s_reg);
+      break;
+    }
+  }
+  return res;
+}
+
+/*
+ * Somewhat messy code here.  We want to allocate a pair of contiguous
+ * physical single-precision floating point registers starting with
+ * an even numbered reg.  It is possible that the paired s_reg (s_reg+1)
+ * has already been allocated - try to fit if possible.  Fail to
+ * allocate if we can't meet the requirements for the pair of
+ * s_reg<=sX[even] & (s_reg+1)<= sX+1.
+ */
+int Mir2Lir::AllocPreservedDouble(int s_reg)
+{
+  int res = -1; // Assume failure
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  int p_map_idx = SRegToPMap(s_reg);
+  if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
+    // Upper reg is already allocated.  Can we fit?
+    int high_reg = promotion_map_[p_map_idx+1].FpReg;
+    if ((high_reg & 1) == 0) {
+      // High reg is even - fail.
+      return res;
+    }
+    // Is the low reg of the pair free?
+    RegisterInfo* p = GetRegInfo(high_reg-1);
+    if (p->in_use || p->is_temp) {
+      // Already allocated or not preserved - fail.
+      return res;
+    }
+    // OK - good to go.
+    res = p->reg;
+    p->in_use = true;
+    DCHECK_EQ((res & 1), 0);
+    MarkPreservedSingle(v_reg, res);
+  } else {
+    RegisterInfo* FPRegs = reg_pool_->FPRegs;
+    for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
+      if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
+        ((FPRegs[i].reg & 0x1) == 0x0) &&
+        !FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
+        ((FPRegs[i+1].reg & 0x1) == 0x1) &&
+        (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
+        res = FPRegs[i].reg;
+        FPRegs[i].in_use = true;
+        MarkPreservedSingle(v_reg, res);
+        FPRegs[i+1].in_use = true;
+        DCHECK_EQ(res + 1, FPRegs[i+1].reg);
+        MarkPreservedSingle(v_reg+1, res+1);
+        break;
+      }
+    }
+  }
+  if (res != -1) {
+    promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx].FpReg = res;
+    promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx+1].FpReg = res + 1;
+  }
+  return res;
+}
+
+
+/*
+ * Reserve a callee-save fp register.   If this register can be used
+ * as the first of a double, attempt to allocate an even pair of fp
+ * single regs (but if can't still attempt to allocate a single, preferring
+ * first to allocate an odd register.
+ */
+int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start)
+{
+  int res = -1;
+  if (double_start) {
+    res = AllocPreservedDouble(s_reg);
+  }
+  if (res == -1) {
+    res = AllocPreservedSingle(s_reg, false /* try odd # */);
+  }
+  if (res == -1)
+    res = AllocPreservedSingle(s_reg, true /* try even # */);
+  return res;
+}
+
+int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
+                           bool required)
+{
+  int i;
+  int next = *next_temp;
+  for (i=0; i< num_regs; i++) {
+    if (next >= num_regs)
+      next = 0;
+    if (p[next].is_temp && !p[next].in_use && !p[next].live) {
+      Clobber(p[next].reg);
+      p[next].in_use = true;
+      p[next].pair = false;
+      *next_temp = next + 1;
+      return p[next].reg;
+    }
+    next++;
+  }
+  next = *next_temp;
+  for (i=0; i< num_regs; i++) {
+    if (next >= num_regs)
+      next = 0;
+    if (p[next].is_temp && !p[next].in_use) {
+      Clobber(p[next].reg);
+      p[next].in_use = true;
+      p[next].pair = false;
+      *next_temp = next + 1;
+      return p[next].reg;
+    }
+    next++;
+  }
+  if (required) {
+    CodegenDump();
+    DumpRegPool(reg_pool_->core_regs,
+          reg_pool_->num_core_regs);
+    LOG(FATAL) << "No free temp registers";
+  }
+  return -1;  // No register available
+}
+
+//REDO: too many assumptions.
+int Mir2Lir::AllocTempDouble()
+{
+  RegisterInfo* p = reg_pool_->FPRegs;
+  int num_regs = reg_pool_->num_fp_regs;
+  /* Start looking at an even reg */
+  int next = reg_pool_->next_fp_reg & ~0x1;
+
+  // First try to avoid allocating live registers
+  for (int i=0; i < num_regs; i+=2) {
+    if (next >= num_regs)
+      next = 0;
+    if ((p[next].is_temp && !p[next].in_use && !p[next].live) &&
+      (p[next+1].is_temp && !p[next+1].in_use && !p[next+1].live)) {
+      Clobber(p[next].reg);
+      Clobber(p[next+1].reg);
+      p[next].in_use = true;
+      p[next+1].in_use = true;
+      DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+      DCHECK_EQ((p[next].reg & 0x1), 0);
+      reg_pool_->next_fp_reg = next + 2;
+      if (reg_pool_->next_fp_reg >= num_regs) {
+        reg_pool_->next_fp_reg = 0;
+      }
+      return p[next].reg;
+    }
+    next += 2;
+  }
+  next = reg_pool_->next_fp_reg & ~0x1;
+
+  // No choice - find a pair and kill it.
+  for (int i=0; i < num_regs; i+=2) {
+    if (next >= num_regs)
+      next = 0;
+    if (p[next].is_temp && !p[next].in_use && p[next+1].is_temp &&
+      !p[next+1].in_use) {
+      Clobber(p[next].reg);
+      Clobber(p[next+1].reg);
+      p[next].in_use = true;
+      p[next+1].in_use = true;
+      DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+      DCHECK_EQ((p[next].reg & 0x1), 0);
+      reg_pool_->next_fp_reg = next + 2;
+      if (reg_pool_->next_fp_reg >= num_regs) {
+        reg_pool_->next_fp_reg = 0;
+      }
+      return p[next].reg;
+    }
+    next += 2;
+  }
+  LOG(FATAL) << "No free temp registers (pair)";
+  return -1;
+}
+
+/* Return a temp if one is available, -1 otherwise */
+int Mir2Lir::AllocFreeTemp()
+{
+  return AllocTempBody(reg_pool_->core_regs,
+             reg_pool_->num_core_regs,
+             &reg_pool_->next_core_reg, true);
+}
+
+int Mir2Lir::AllocTemp()
+{
+  return AllocTempBody(reg_pool_->core_regs,
+             reg_pool_->num_core_regs,
+             &reg_pool_->next_core_reg, true);
+}
+
+int Mir2Lir::AllocTempFloat()
+{
+  return AllocTempBody(reg_pool_->FPRegs,
+             reg_pool_->num_fp_regs,
+             &reg_pool_->next_fp_reg, true);
+}
+
+Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg)
+{
+  int i;
+  if (s_reg == -1)
+    return NULL;
+  for (i=0; i < num_regs; i++) {
+    if (p[i].live && (p[i].s_reg == s_reg)) {
+      if (p[i].is_temp)
+        p[i].in_use = true;
+      return &p[i];
+    }
+  }
+  return NULL;
+}
+
+Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class)
+{
+  RegisterInfo* res = NULL;
+  switch (reg_class) {
+    case kAnyReg:
+      res = AllocLiveBody(reg_pool_->FPRegs,
+                reg_pool_->num_fp_regs, s_reg);
+      if (res)
+        break;
+      /* Intentional fallthrough */
+    case kCoreReg:
+      res = AllocLiveBody(reg_pool_->core_regs,
+                reg_pool_->num_core_regs, s_reg);
+      break;
+    case kFPReg:
+      res = AllocLiveBody(reg_pool_->FPRegs,
+                reg_pool_->num_fp_regs, s_reg);
+      break;
+    default:
+      LOG(FATAL) << "Invalid register type";
+  }
+  return res;
+}
+
+void Mir2Lir::FreeTemp(int reg)
+{
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
+  int i;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      if (p[i].is_temp) {
+        p[i].in_use = false;
+      }
+      p[i].pair = false;
+      return;
+    }
+  }
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      if (p[i].is_temp) {
+        p[i].in_use = false;
+      }
+      p[i].pair = false;
+      return;
+    }
+  }
+  LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+}
+
+Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg)
+{
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
+  int i;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      return p[i].live ? &p[i] : NULL;
+    }
+  }
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      return p[i].live ? &p[i] : NULL;
+    }
+  }
+  return NULL;
+}
+
+Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg)
+{
+  RegisterInfo* p = GetRegInfo(reg);
+  return (p->is_temp) ? p : NULL;
+}
+
+Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg)
+{
+  RegisterInfo* p = GetRegInfo(reg);
+  return (p->is_temp) ? NULL : p;
+}
+
+bool Mir2Lir::IsDirty(int reg)
+{
+  RegisterInfo* p = GetRegInfo(reg);
+  return p->dirty;
+}
+
+/*
+ * Similar to AllocTemp(), but forces the allocation of a specific
+ * register.  No check is made to see if the register was previously
+ * allocated.  Use with caution.
+ */
+void Mir2Lir::LockTemp(int reg)
+{
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
+  int i;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      DCHECK(p[i].is_temp);
+      p[i].in_use = true;
+      p[i].live = false;
+      return;
+    }
+  }
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
+  for (i=0; i< num_regs; i++) {
+    if (p[i].reg == reg) {
+      DCHECK(p[i].is_temp);
+      p[i].in_use = true;
+      p[i].live = false;
+      return;
+    }
+  }
+  LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+}
+
+void Mir2Lir::ResetDef(int reg)
+{
+  ResetDefBody(GetRegInfo(reg));
+}
+
+void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2)
+{
+  if (start && finish) {
+    LIR *p;
+    DCHECK_EQ(s_reg1, s_reg2);
+    for (p = start; ;p = p->next) {
+      NopLIR(p);
+      if (p == finish)
+        break;
+    }
+  }
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish)
+{
+  DCHECK(!rl.wide);
+  DCHECK(start && start->next);
+  DCHECK(finish);
+  RegisterInfo* p = GetRegInfo(rl.low_reg);
+  p->def_start = start->next;
+  p->def_end = finish;
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish)
+{
+  DCHECK(rl.wide);
+  DCHECK(start && start->next);
+  DCHECK(finish);
+  RegisterInfo* p = GetRegInfo(rl.low_reg);
+  ResetDef(rl.high_reg);  // Only track low of pair
+  p->def_start = start->next;
+  p->def_end = finish;
+}
+
+RegLocation Mir2Lir::WideToNarrow(RegLocation rl)
+{
+  DCHECK(rl.wide);
+  if (rl.location == kLocPhysReg) {
+    RegisterInfo* info_lo = GetRegInfo(rl.low_reg);
+    RegisterInfo* info_hi = GetRegInfo(rl.high_reg);
+    if (info_lo->is_temp) {
+      info_lo->pair = false;
+      info_lo->def_start = NULL;
+      info_lo->def_end = NULL;
+    }
+    if (info_hi->is_temp) {
+      info_hi->pair = false;
+      info_hi->def_start = NULL;
+      info_hi->def_end = NULL;
+    }
+  }
+  rl.wide = false;
+  return rl;
+}
+
+void Mir2Lir::ResetDefLoc(RegLocation rl)
+{
+  DCHECK(!rl.wide);
+  RegisterInfo* p = IsTemp(rl.low_reg);
+  if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
+    DCHECK(!p->pair);
+    NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
+  }
+  ResetDef(rl.low_reg);
+}
+
+void Mir2Lir::ResetDefLocWide(RegLocation rl)
+{
+  DCHECK(rl.wide);
+  RegisterInfo* p_low = IsTemp(rl.low_reg);
+  RegisterInfo* p_high = IsTemp(rl.high_reg);
+  if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
+    DCHECK(p_low->pair);
+    NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
+  }
+  if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
+    DCHECK(p_high->pair);
+  }
+  ResetDef(rl.low_reg);
+  ResetDef(rl.high_reg);
+}
+
+void Mir2Lir::ResetDefTracking()
+{
+  int i;
+  for (i=0; i< reg_pool_->num_core_regs; i++) {
+    ResetDefBody(&reg_pool_->core_regs[i]);
+  }
+  for (i=0; i< reg_pool_->num_fp_regs; i++) {
+    ResetDefBody(&reg_pool_->FPRegs[i]);
+  }
+}
+
+void Mir2Lir::ClobberAllRegs()
+{
+  int i;
+  for (i=0; i< reg_pool_->num_core_regs; i++) {
+    ClobberBody(&reg_pool_->core_regs[i]);
+  }
+  for (i=0; i< reg_pool_->num_fp_regs; i++) {
+    ClobberBody(&reg_pool_->FPRegs[i]);
+  }
+}
+
+// Make sure nothing is live and dirty
+void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs)
+{
+  int i;
+  for (i=0; i < num_regs; i++) {
+    if (info[i].live && info[i].dirty) {
+      if (info[i].pair) {
+        FlushRegWide(info[i].reg, info[i].partner);
+      } else {
+        FlushReg(info[i].reg);
+      }
+    }
+  }
+}
+
+void Mir2Lir::FlushAllRegs()
+{
+  FlushAllRegsBody(reg_pool_->core_regs,
+           reg_pool_->num_core_regs);
+  FlushAllRegsBody(reg_pool_->FPRegs,
+           reg_pool_->num_fp_regs);
+  ClobberAllRegs();
+}
+
+
+//TUNING: rewrite all of this reg stuff.  Probably use an attribute table
+bool Mir2Lir::RegClassMatches(int reg_class, int reg)
+{
+  if (reg_class == kAnyReg) {
+    return true;
+  } else if (reg_class == kCoreReg) {
+    return !IsFpReg(reg);
+  } else {
+    return IsFpReg(reg);
+  }
+}
+
+void Mir2Lir::MarkLive(int reg, int s_reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
+    return;  /* already live */
+  } else if (s_reg != INVALID_SREG) {
+    ClobberSReg(s_reg);
+    if (info->is_temp) {
+      info->live = true;
+    }
+  } else {
+    /* Can't be live if no associated s_reg */
+    DCHECK(info->is_temp);
+    info->live = false;
+  }
+  info->s_reg = s_reg;
+}
+
+void Mir2Lir::MarkTemp(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  info->is_temp = true;
+}
+
+void Mir2Lir::UnmarkTemp(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  info->is_temp = false;
+}
+
+void Mir2Lir::MarkPair(int low_reg, int high_reg)
+{
+  RegisterInfo* info_lo = GetRegInfo(low_reg);
+  RegisterInfo* info_hi = GetRegInfo(high_reg);
+  info_lo->pair = info_hi->pair = true;
+  info_lo->partner = high_reg;
+  info_hi->partner = low_reg;
+}
+
+void Mir2Lir::MarkClean(RegLocation loc)
+{
+  RegisterInfo* info = GetRegInfo(loc.low_reg);
+  info->dirty = false;
+  if (loc.wide) {
+    info = GetRegInfo(loc.high_reg);
+    info->dirty = false;
+  }
+}
+
+void Mir2Lir::MarkDirty(RegLocation loc)
+{
+  if (loc.home) {
+    // If already home, can't be dirty
+    return;
+  }
+  RegisterInfo* info = GetRegInfo(loc.low_reg);
+  info->dirty = true;
+  if (loc.wide) {
+    info = GetRegInfo(loc.high_reg);
+    info->dirty = true;
+  }
+}
+
+void Mir2Lir::MarkInUse(int reg)
+{
+    RegisterInfo* info = GetRegInfo(reg);
+    info->in_use = true;
+}
+
+void Mir2Lir::CopyRegInfo(int new_reg, int old_reg)
+{
+  RegisterInfo* new_info = GetRegInfo(new_reg);
+  RegisterInfo* old_info = GetRegInfo(old_reg);
+  // Target temp status must not change
+  bool is_temp = new_info->is_temp;
+  *new_info = *old_info;
+  // Restore target's temp status
+  new_info->is_temp = is_temp;
+  new_info->reg = new_reg;
+}
+
+bool Mir2Lir::CheckCorePoolSanity()
+{
+   for (static int i = 0; i < reg_pool_->num_core_regs; i++) {
+     if (reg_pool_->core_regs[i].pair) {
+       static int my_reg = reg_pool_->core_regs[i].reg;
+       static int my_sreg = reg_pool_->core_regs[i].s_reg;
+       static int partner_reg = reg_pool_->core_regs[i].partner;
+       static RegisterInfo* partner = GetRegInfo(partner_reg);
+       DCHECK(partner != NULL);
+       DCHECK(partner->pair);
+       DCHECK_EQ(my_reg, partner->partner);
+       static int partner_sreg = partner->s_reg;
+       if (my_sreg == INVALID_SREG) {
+         DCHECK_EQ(partner_sreg, INVALID_SREG);
+       } else {
+         int diff = my_sreg - partner_sreg;
+         DCHECK((diff == -1) || (diff == 1));
+       }
+     }
+     if (!reg_pool_->core_regs[i].live) {
+       DCHECK(reg_pool_->core_regs[i].def_start == NULL);
+       DCHECK(reg_pool_->core_regs[i].def_end == NULL);
+     }
+   }
+   return true;
+}
+
+/*
+ * Return an updated location record with current in-register status.
+ * If the value lives in live temps, reflect that fact.  No code
+ * is generated.  If the live value is part of an older pair,
+ * clobber both low and high.
+ * TUNING: clobbering both is a bit heavy-handed, but the alternative
+ * is a bit complex when dealing with FP regs.  Examine code to see
+ * if it's worthwhile trying to be more clever here.
+ */
+
+RegLocation Mir2Lir::UpdateLoc(RegLocation loc)
+{
+  DCHECK(!loc.wide);
+  DCHECK(CheckCorePoolSanity());
+  if (loc.location != kLocPhysReg) {
+    DCHECK((loc.location == kLocDalvikFrame) ||
+         (loc.location == kLocCompilerTemp));
+    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
+    if (info_lo) {
+      if (info_lo->pair) {
+        Clobber(info_lo->reg);
+        Clobber(info_lo->partner);
+        FreeTemp(info_lo->reg);
+      } else {
+        loc.low_reg = info_lo->reg;
+        loc.location = kLocPhysReg;
+      }
+    }
+  }
+
+  return loc;
+}
+
+/* see comments for update_loc */
+RegLocation Mir2Lir::UpdateLocWide(RegLocation loc)
+{
+  DCHECK(loc.wide);
+  DCHECK(CheckCorePoolSanity());
+  if (loc.location != kLocPhysReg) {
+    DCHECK((loc.location == kLocDalvikFrame) ||
+         (loc.location == kLocCompilerTemp));
+    // Are the dalvik regs already live in physical registers?
+    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
+    RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
+    bool match = true;
+    match = match && (info_lo != NULL);
+    match = match && (info_hi != NULL);
+    // Are they both core or both FP?
+    match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
+    // If a pair of floating point singles, are they properly aligned?
+    if (match && IsFpReg(info_lo->reg)) {
+      match &= ((info_lo->reg & 0x1) == 0);
+      match &= ((info_hi->reg - info_lo->reg) == 1);
+    }
+    // If previously used as a pair, it is the same pair?
+    if (match && (info_lo->pair || info_hi->pair)) {
+      match = (info_lo->pair == info_hi->pair);
+      match &= ((info_lo->reg == info_hi->partner) &&
+            (info_hi->reg == info_lo->partner));
+    }
+    if (match) {
+      // Can reuse - update the register usage info
+      loc.low_reg = info_lo->reg;
+      loc.high_reg = info_hi->reg;
+      loc.location = kLocPhysReg;
+      MarkPair(loc.low_reg, loc.high_reg);
+      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+      return loc;
+    }
+    // Can't easily reuse - clobber and free any overlaps
+    if (info_lo) {
+      Clobber(info_lo->reg);
+      FreeTemp(info_lo->reg);
+      if (info_lo->pair)
+        Clobber(info_lo->partner);
+    }
+    if (info_hi) {
+      Clobber(info_hi->reg);
+      FreeTemp(info_hi->reg);
+      if (info_hi->pair)
+        Clobber(info_hi->partner);
+    }
+  }
+  return loc;
+}
+
+
+/* For use in cases we don't know (or care) width */
+RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc)
+{
+  if (loc.wide)
+    return UpdateLocWide(loc);
+  else
+    return UpdateLoc(loc);
+}
+
+RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
+{
+  DCHECK(loc.wide);
+  int new_regs;
+  int low_reg;
+  int high_reg;
+
+  loc = UpdateLocWide(loc);
+
+  /* If already in registers, we can assume proper form.  Right reg class? */
+  if (loc.location == kLocPhysReg) {
+    DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
+    DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
+      /* Wrong register class.  Reallocate and copy */
+      new_regs = AllocTypedTempPair(loc.fp, reg_class);
+      low_reg = new_regs & 0xff;
+      high_reg = (new_regs >> 8) & 0xff;
+      OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
+      CopyRegInfo(low_reg, loc.low_reg);
+      CopyRegInfo(high_reg, loc.high_reg);
+      Clobber(loc.low_reg);
+      Clobber(loc.high_reg);
+      loc.low_reg = low_reg;
+      loc.high_reg = high_reg;
+      MarkPair(loc.low_reg, loc.high_reg);
+      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+    }
+    return loc;
+  }
+
+  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
+  DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
+
+  new_regs = AllocTypedTempPair(loc.fp, reg_class);
+  loc.low_reg = new_regs & 0xff;
+  loc.high_reg = (new_regs >> 8) & 0xff;
+
+  MarkPair(loc.low_reg, loc.high_reg);
+  if (update) {
+    loc.location = kLocPhysReg;
+    MarkLive(loc.low_reg, loc.s_reg_low);
+    MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
+  }
+  DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+  return loc;
+}
+
+RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update)
+{
+  int new_reg;
+
+  if (loc.wide)
+    return EvalLocWide(loc, reg_class, update);
+
+  loc = UpdateLoc(loc);
+
+  if (loc.location == kLocPhysReg) {
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
+      /* Wrong register class.  Realloc, copy and transfer ownership */
+      new_reg = AllocTypedTemp(loc.fp, reg_class);
+      OpRegCopy(new_reg, loc.low_reg);
+      CopyRegInfo(new_reg, loc.low_reg);
+      Clobber(loc.low_reg);
+      loc.low_reg = new_reg;
+    }
+    return loc;
+  }
+
+  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
+
+  new_reg = AllocTypedTemp(loc.fp, reg_class);
+  loc.low_reg = new_reg;
+
+  if (update) {
+    loc.location = kLocPhysReg;
+    MarkLive(loc.low_reg, loc.s_reg_low);
+  }
+  return loc;
+}
+
+/* USE SSA names to count references of base Dalvik v_regs. */
+void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts) {
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
+    RegLocation loc = mir_graph_->reg_location_[i];
+    RefCounts* counts = loc.fp ? fp_counts : core_counts;
+    int p_map_idx = SRegToPMap(loc.s_reg_low);
+    //Don't count easily regenerated immediates
+    if (loc.fp || !IsInexpensiveConstant(loc)) {
+      counts[p_map_idx].count += mir_graph_->GetUseCount(i);
+    }
+    if (loc.wide && loc.fp && !loc.high_word) {
+      counts[p_map_idx].double_start = true;
+    }
+  }
+}
+
+/* qsort callback function, sort descending */
+static int SortCounts(const void *val1, const void *val2)
+{
+  const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
+  const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
+  return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1);
+}
+
+void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg)
+{
+  LOG(INFO) << msg;
+  for (int i = 0; i < size; i++) {
+    LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+  }
+}
+
+/*
+ * Note: some portions of this code required even if the kPromoteRegs
+ * optimization is disabled.
+ */
+void Mir2Lir::DoPromotion()
+{
+  int reg_bias = cu_->num_compiler_temps + 1;
+  int dalvik_regs = cu_->num_dalvik_registers;
+  int num_regs = dalvik_regs + reg_bias;
+  const int promotion_threshold = 1;
+
+  // Allow target code to add any special registers
+  AdjustSpillMask();
+
+  /*
+   * Simple register promotion. Just do a static count of the uses
+   * of Dalvik registers.  Note that we examine the SSA names, but
+   * count based on original Dalvik register name.  Count refs
+   * separately based on type in order to give allocation
+   * preference to fp doubles - which must be allocated sequential
+   * physical single fp registers started with an even-numbered
+   * reg.
+   * TUNING: replace with linear scan once we have the ability
+   * to describe register live ranges for GC.
+   */
+  RefCounts *core_regs =
+      static_cast<RefCounts*>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+                                             ArenaAllocator::kAllocRegAlloc));
+  RefCounts *FpRegs =
+      static_cast<RefCounts *>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+                                              ArenaAllocator::kAllocRegAlloc));
+  // Set ssa names for original Dalvik registers
+  for (int i = 0; i < dalvik_regs; i++) {
+    core_regs[i].s_reg = FpRegs[i].s_reg = i;
+  }
+  // Set ssa name for Method*
+  core_regs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();
+  FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();  // For consistecy
+  // Set ssa names for compiler_temps
+  for (int i = 1; i <= cu_->num_compiler_temps; i++) {
+    CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
+    core_regs[dalvik_regs + i].s_reg = ct->s_reg;
+    FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
+  }
+
+  // Sum use counts of SSA regs by original Dalvik vreg.
+  CountRefs(core_regs, FpRegs);
+
+  /*
+   * Ideally, we'd allocate doubles starting with an even-numbered
+   * register.  Bias the counts to try to allocate any vreg that's
+   * used as the start of a pair first.
+   */
+  for (int i = 0; i < num_regs; i++) {
+    if (FpRegs[i].double_start) {
+      FpRegs[i].count *= 2;
+    }
+  }
+
+  // Sort the count arrays
+  qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
+  qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
+
+  if (cu_->verbose) {
+    DumpCounts(core_regs, num_regs, "Core regs after sort");
+    DumpCounts(FpRegs, num_regs, "Fp regs after sort");
+  }
+
+  if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
+    // Promote FpRegs
+    for (int i = 0; (i < num_regs) &&
+            (FpRegs[i].count >= promotion_threshold ); i++) {
+      int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
+      if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
+        int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
+          FpRegs[i].double_start);
+        if (reg < 0) {
+          break;  // No more left
+        }
+      }
+    }
+
+    // Promote core regs
+    for (int i = 0; (i < num_regs) &&
+            (core_regs[i].count >= promotion_threshold); i++) {
+      int p_map_idx = SRegToPMap(core_regs[i].s_reg);
+      if (promotion_map_[p_map_idx].core_location !=
+          kLocPhysReg) {
+        int reg = AllocPreservedCoreReg(core_regs[i].s_reg);
+        if (reg < 0) {
+           break;  // No more left
+        }
+      }
+    }
+  }
+
+  // Now, update SSA names to new home locations
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
+    RegLocation *curr = &mir_graph_->reg_location_[i];
+    int p_map_idx = SRegToPMap(curr->s_reg_low);
+    if (!curr->wide) {
+      if (curr->fp) {
+        if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
+          curr->location = kLocPhysReg;
+          curr->low_reg = promotion_map_[p_map_idx].FpReg;
+          curr->home = true;
+        }
+      } else {
+        if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
+          curr->location = kLocPhysReg;
+          curr->low_reg = promotion_map_[p_map_idx].core_reg;
+          curr->home = true;
+        }
+      }
+      curr->high_reg = INVALID_REG;
+    } else {
+      if (curr->high_word) {
+        continue;
+      }
+      if (curr->fp) {
+        if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) &&
+          (promotion_map_[p_map_idx+1].fp_location ==
+          kLocPhysReg)) {
+          int low_reg = promotion_map_[p_map_idx].FpReg;
+          int high_reg = promotion_map_[p_map_idx+1].FpReg;
+          // Doubles require pair of singles starting at even reg
+          if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
+            curr->location = kLocPhysReg;
+            curr->low_reg = low_reg;
+            curr->high_reg = high_reg;
+            curr->home = true;
+          }
+        }
+      } else {
+        if ((promotion_map_[p_map_idx].core_location == kLocPhysReg)
+           && (promotion_map_[p_map_idx+1].core_location ==
+           kLocPhysReg)) {
+          curr->location = kLocPhysReg;
+          curr->low_reg = promotion_map_[p_map_idx].core_reg;
+          curr->high_reg = promotion_map_[p_map_idx+1].core_reg;
+          curr->home = true;
+        }
+      }
+    }
+  }
+  if (cu_->verbose) {
+    DumpPromotionMap();
+  }
+}
+
+/* Returns sp-relative offset in bytes for a VReg */
+int Mir2Lir::VRegOffset(int v_reg)
+{
+  return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_,
+                                     fp_spill_mask_, frame_size_, v_reg);
+}
+
+/* Returns sp-relative offset in bytes for a SReg */
+int Mir2Lir::SRegOffset(int s_reg)
+{
+  return VRegOffset(mir_graph_->SRegToVReg(s_reg));
+}
+
+/* Mark register usage state and return long retloc */
+RegLocation Mir2Lir::GetReturnWide(bool is_double)
+{
+  RegLocation gpr_res = LocCReturnWide();
+  RegLocation fpr_res = LocCReturnDouble();
+  RegLocation res = is_double ? fpr_res : gpr_res;
+  Clobber(res.low_reg);
+  Clobber(res.high_reg);
+  LockTemp(res.low_reg);
+  LockTemp(res.high_reg);
+  MarkPair(res.low_reg, res.high_reg);
+  return res;
+}
+
+RegLocation Mir2Lir::GetReturn(bool is_float)
+{
+  RegLocation gpr_res = LocCReturn();
+  RegLocation fpr_res = LocCReturnFloat();
+  RegLocation res = is_float ? fpr_res : gpr_res;
+  Clobber(res.low_reg);
+  if (cu_->instruction_set == kMips) {
+    MarkInUse(res.low_reg);
+  } else {
+    LockTemp(res.low_reg);
+  }
+  return res;
+}
+
+void Mir2Lir::SimpleRegAlloc()
+{
+  DoPromotion();
+
+  if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) {
+    LOG(INFO) << "After Promotion";
+    mir_graph_->DumpRegLocTable(mir_graph_->reg_location_, mir_graph_->GetNumSSARegs());
+  }
+
+  /* Set the frame size */
+  frame_size_ = ComputeFrameSize();
+}
+
+/*
+ * Get the "real" sreg number associated with an s_reg slot.  In general,
+ * s_reg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the mir_graph_->reg_location
+ * array.  However, renaming is accomplished by simply replacing RegLocation
+ * entries in the reglocation[] array.  Therefore, when location
+ * records for operands are first created, we need to ask the locRecord
+ * identified by the dataflow pass what it's new name is.
+ */
+int Mir2Lir::GetSRegHi(int lowSreg) {
+  return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
+}
+
+bool Mir2Lir::oat_live_out(int s_reg) {
+  //For now.
+  return true;
+}
+
+int Mir2Lir::oatSSASrc(MIR* mir, int num) {
+  DCHECK_GT(mir->ssa_rep->num_uses, num);
+  return mir->ssa_rep->uses[num];
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
new file mode 100644
index 0000000..4aeda41
--- /dev/null
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -0,0 +1,1388 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = {
+  { kX8632BitData, kData,    IS_UNARY_OP,            { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data",  "0x!0d" },
+  { kX86Bkpt,      kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
+  { kX86Nop,       kNop,     IS_UNARY_OP,            { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop",   "" },
+
+#define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \
+                     rm8_r8, rm32_r32, \
+                     r8_rm8, r32_rm32, \
+                     ax8_i8, ax32_i32, \
+                     rm8_i8, rm8_i8_modrm, \
+                     rm32_i32, rm32_i32_modrm, \
+                     rm32_i8, rm32_i8_modrm) \
+{ kX86 ## opname ## 8MR, kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 8AR, kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 8RR, kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 8RM, kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 8RA, kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 8RI, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8TI", "fs:[!0d],!1d" }, \
+  \
+{ kX86 ## opname ## 16MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0 }, #opname "16MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 16AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0 }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 16TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0,              0,        0 }, #opname "16TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 16RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 16RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 16RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 16RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 16RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 16RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16TI8", "fs:[!0d],!1d" }, \
+  \
+{ kX86 ## opname ## 32MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 32AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 32TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 32RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 32RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 32RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 32RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 32RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 32RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32TI8", "fs:[!0d],!1d" }
+
+ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x00 /* RegMem8/Reg8 */,     0x01 /* RegMem32/Reg32 */,
+  0x02 /* Reg8/RegMem8 */,     0x03 /* Reg32/RegMem32 */,
+  0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */,
+  0x80, 0x0 /* RegMem8/imm8 */,
+  0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */),
+ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x08 /* RegMem8/Reg8 */,     0x09 /* RegMem32/Reg32 */,
+  0x0A /* Reg8/RegMem8 */,     0x0B /* Reg32/RegMem32 */,
+  0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */,
+  0x80, 0x1 /* RegMem8/imm8 */,
+  0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */),
+ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+  0x10 /* RegMem8/Reg8 */,     0x11 /* RegMem32/Reg32 */,
+  0x12 /* Reg8/RegMem8 */,     0x13 /* Reg32/RegMem32 */,
+  0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */,
+  0x80, 0x2 /* RegMem8/imm8 */,
+  0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */),
+ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+  0x18 /* RegMem8/Reg8 */,     0x19 /* RegMem32/Reg32 */,
+  0x1A /* Reg8/RegMem8 */,     0x1B /* Reg32/RegMem32 */,
+  0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */,
+  0x80, 0x3 /* RegMem8/imm8 */,
+  0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */),
+ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x20 /* RegMem8/Reg8 */,     0x21 /* RegMem32/Reg32 */,
+  0x22 /* Reg8/RegMem8 */,     0x23 /* Reg32/RegMem32 */,
+  0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */,
+  0x80, 0x4 /* RegMem8/imm8 */,
+  0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */),
+ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x28 /* RegMem8/Reg8 */,     0x29 /* RegMem32/Reg32 */,
+  0x2A /* Reg8/RegMem8 */,     0x2B /* Reg32/RegMem32 */,
+  0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */,
+  0x80, 0x5 /* RegMem8/imm8 */,
+  0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */),
+ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x30 /* RegMem8/Reg8 */,     0x31 /* RegMem32/Reg32 */,
+  0x32 /* Reg8/RegMem8 */,     0x33 /* Reg32/RegMem32 */,
+  0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */,
+  0x80, 0x6 /* RegMem8/imm8 */,
+  0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */),
+ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
+  0x38 /* RegMem8/Reg8 */,     0x39 /* RegMem32/Reg32 */,
+  0x3A /* Reg8/RegMem8 */,     0x3B /* Reg32/RegMem32 */,
+  0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */,
+  0x80, 0x7 /* RegMem8/imm8 */,
+  0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */),
+#undef ENCODING_MAP
+
+  { kX86Imul16RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RRI", "!0r,!1r,!2d" },
+  { kX86Imul16RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RMI", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul16RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+  { kX86Imul32RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RRI", "!0r,!1r,!2d" },
+  { kX86Imul32RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RMI", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul32RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+  { kX86Imul32RRI8,  kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RRI8", "!0r,!1r,!2d" },
+  { kX86Imul32RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul32RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+  { kX86Mov8MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" },
+  { kX86Mov8AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" },
+  { kX86Mov8RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RR", "!0r,!1r" },
+  { kX86Mov8RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RM", "!0r,[!1r+!2d]" },
+  { kX86Mov8RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov8RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RT", "!0r,fs:[!1d]" },
+  { kX86Mov8RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB0, 0, 0, 0, 0, 1 }, "Mov8RI", "!0r,!1d" },
+  { kX86Mov8MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8MI", "[!0r+!1d],!2d" },
+  { kX86Mov8AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8TI", "fs:[!0d],!1d" },
+
+  { kX86Mov16MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0x66,          0,    0x89, 0, 0, 0, 0, 0 }, "Mov16MR", "[!0r+!1d],!2r" },
+  { kX86Mov16AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0x66,          0,    0x89, 0, 0, 0, 0, 0 }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0 }, "Mov16TR", "fs:[!0d],!1r" },
+  { kX86Mov16RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RR", "!0r,!1r" },
+  { kX86Mov16RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RM", "!0r,[!1r+!2d]" },
+  { kX86Mov16RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov16RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RT", "!0r,fs:[!1d]" },
+  { kX86Mov16RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0x66,          0,    0xB8, 0, 0, 0, 0, 2 }, "Mov16RI", "!0r,!1d" },
+  { kX86Mov16MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0x66,          0,    0xC7, 0, 0, 0, 0, 2 }, "Mov16MI", "[!0r+!1d],!2d" },
+  { kX86Mov16AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0x66,          0,    0xC7, 0, 0, 0, 0, 2 }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" },
+
+  { kX86Mov32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" },
+  { kX86Mov32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" },
+  { kX86Mov32RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" },
+  { kX86Mov32RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" },
+  { kX86Mov32RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov32RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" },
+  { kX86Mov32RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" },
+  { kX86Mov32MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" },
+  { kX86Mov32AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" },
+
+  { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+
+#define SHIFT_ENCODING_MAP(opname, modrm_opcode) \
+{ kX86 ## opname ## 8RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8RC", "!0r,cl" }, \
+{ kX86 ## opname ## 8MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 8AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+  \
+{ kX86 ## opname ## 16RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16RC", "!0r,cl" }, \
+{ kX86 ## opname ## 16MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 16AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+  \
+{ kX86 ## opname ## 32RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32RC", "!0r,cl" }, \
+{ kX86 ## opname ## 32MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 32AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }
+
+  SHIFT_ENCODING_MAP(Rol, 0x0),
+  SHIFT_ENCODING_MAP(Ror, 0x1),
+  SHIFT_ENCODING_MAP(Rcl, 0x2),
+  SHIFT_ENCODING_MAP(Rcr, 0x3),
+  SHIFT_ENCODING_MAP(Sal, 0x4),
+  SHIFT_ENCODING_MAP(Shr, 0x5),
+  SHIFT_ENCODING_MAP(Sar, 0x7),
+#undef SHIFT_ENCODING_MAP
+
+  { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0}, "Cmc", "" },
+
+  { kX86Test8RI,  kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" },
+  { kX86Test8MI,  kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" },
+  { kX86Test8AI,  kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test16RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16RI", "!0r,!1d" },
+  { kX86Test16MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16MI", "[!0r+!1d],!2d" },
+  { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test32RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" },
+  { kX86Test32MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" },
+  { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test32RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { 0,    0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" },
+
+#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
+                           reg, reg_kind, reg_flags, \
+                           mem, mem_kind, mem_flags, \
+                           arr, arr_kind, arr_flags, imm, \
+                           b_flags, hw_flags, w_flags, \
+                           b_format, hw_format, w_format) \
+{ kX86 ## opname ## 8 ## reg,  reg_kind,                      reg_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #reg, #b_format "!0r" }, \
+{ kX86 ## opname ## 8 ## mem,  mem_kind, IS_LOAD | is_store | mem_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #mem, #b_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 8 ## arr,  arr_kind, IS_LOAD | is_store | arr_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #arr, #b_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 16 ## reg, reg_kind,                      reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #reg, #hw_format "!0r" }, \
+{ kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #mem, #hw_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, #hw_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 32 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, #w_format "!0r" }, \
+{ kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, #w_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, #w_format "[!0r+!1r<<!2d+!3d]" }
+
+  UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0,           R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+  UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+
+  UNARY_ENCODING_MAP(Mul,     0x4, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+  UNARY_ENCODING_MAP(Imul,    0x5, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+  UNARY_ENCODING_MAP(Divmod,  0x6, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+  UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+#undef UNARY_ENCODING_MAP
+
+#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
+{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
+{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+
+  EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0),
+  { kX86MovsdMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" },
+  { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0),
+  { kX86MovssMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssMR", "[!0r+!1d],!2r" },
+  { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Cvtsi2sd,  0xF2, 0x2A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsi2ss,  0xF3, 0x2A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsd2si,  0xF2, 0x2D, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtss2si,  0xF3, 0x2D, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Ucomisd,   0x66, 0x2E, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Ucomiss,   0x00, 0x2E, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Comisd,    0x66, 0x2F, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Comiss,    0x00, 0x2F, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Orps,      0x00, 0x56, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Xorps,     0x00, 0x57, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Addsd,     0xF2, 0x58, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Addss,     0xF3, 0x58, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Mulsd,     0xF2, 0x59, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Mulss,     0xF3, 0x59, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsd2ss,  0xF2, 0x5A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtss2sd,  0xF3, 0x5A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Subsd,     0xF2, 0x5C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Subss,     0xF3, 0x5C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Divsd,     0xF2, 0x5E, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Divss,     0xF3, 0x5E, REG_DEF0),
+
+  { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" },
+  { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" },
+
+  EXT_0F_ENCODING_MAP(Movdxr,    0x66, 0x6E, REG_DEF0),
+  { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0   | REG_USE01,  { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxRR", "!0r,!1r" },
+  { kX86MovdrxMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxMR", "[!0r+!1d],!2r" },
+  { kX86MovdrxAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  { kX86Set8R, kRegCond,              IS_BINARY_OP   | REG_DEF0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8R", "!1c !0r" },
+  { kX86Set8M, kMemCond,   IS_STORE | IS_TERTIARY_OP | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8M", "!2c [!0r+!1d]" },
+  { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP     | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
+
+  // TODO: load/store?
+  // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly.
+  { kX86Mfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0 }, "Mfence", "" },
+
+  EXT_0F_ENCODING_MAP(Imul16,  0x66, 0xAF, REG_DEF0 | SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Imul32,  0x00, 0xAF, REG_DEF0 | SETS_CCODES),
+
+  { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" },
+  { kX86CmpxchgMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" },
+  { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86LockCmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "!0r,!1r" },
+  { kX86LockCmpxchgMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
+  { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Movzx8,  0x00, 0xB6, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movsx8,  0x00, 0xBE, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0),
+#undef EXT_0F_ENCODING_MAP
+
+  { kX86Jcc8,  kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x70, 0,    0, 0, 0, 0 }, "Jcc8",  "!1c !0t" },
+  { kX86Jcc32, kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x0F, 0x80, 0, 0, 0, 0 }, "Jcc32", "!1c !0t" },
+  { kX86Jmp8,  kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xEB, 0,    0, 0, 0, 0 }, "Jmp8",  "!0t" },
+  { kX86Jmp32, kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xE9, 0,    0, 0, 0, 0 }, "Jmp32", "!0t" },
+  { kX86JmpR,  kJmp,  IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xFF, 0,    0, 4, 0, 0 }, "JmpR",  "!0r" },
+  { kX86CallR, kCall, IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xE8, 0,    0, 0, 0, 0 }, "CallR", "!0r" },
+  { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0,        { 0,             0, 0xFF, 0,    0, 2, 0, 0 }, "CallM", "[!0r+!1d]" },
+  { kX86CallA, kCall, IS_QUAD_OP   | IS_BRANCH | IS_LOAD | REG_USE01,       { 0,             0, 0xFF, 0,    0, 2, 0, 0 }, "CallA", "[!0r+!1r<<!2d+!3d]" },
+  { kX86CallT, kCall, IS_UNARY_OP  | IS_BRANCH | IS_LOAD,                   { THREAD_PREFIX, 0, 0xFF, 0,    0, 2, 0, 0 }, "CallT", "fs:[!0d]" },
+  { kX86Ret,   kNullary,NO_OPERAND | IS_BRANCH,                             { 0,             0, 0xC3, 0,    0, 0, 0, 0 }, "Ret", "" },
+
+  { kX86StartOfMethod, kMacro,  IS_UNARY_OP | SETS_CCODES,             { 0, 0, 0,    0, 0, 0, 0, 0 }, "StartOfMethod", "!0r" },
+  { kX86PcRelLoadRA,   kPcRel,  IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "PcRelLoadRA",   "!0r,[!1r+!2r<<!3d+!4p]" },
+  { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr",      "!0r,!1d" },
+};
+
+static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) {
+  size_t size = 0;
+  if (entry->skeleton.prefix1 > 0) {
+    ++size;
+    if (entry->skeleton.prefix2 > 0) {
+      ++size;
+    }
+  }
+  ++size;  // opcode
+  if (entry->skeleton.opcode == 0x0F) {
+    ++size;
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
+      ++size;
+    }
+  }
+  ++size;  // modrm
+  if (has_sib || base == rX86_SP) {
+    // SP requires a SIB byte.
+    ++size;
+  }
+  if (displacement != 0 || base == rBP) {
+    // BP requires an explicit displacement, even when it's 0.
+    if (entry->opcode != kX86Lea32RA) {
+      DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name;
+    }
+    size += IS_SIMM8(displacement) ? 1 : 4;
+  }
+  size += entry->skeleton.immediate_bytes;
+  return size;
+}
+
+int X86Mir2Lir::GetInsnSize(LIR* lir) {
+  const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode];
+  switch (entry->kind) {
+    case kData:
+      return 4;  // 4 bytes of data
+    case kNop:
+      return lir->operands[0];  // length of nop is sole operand
+    case kNullary:
+      return 1;  // 1 byte of opcode
+    case kReg:  // lir operands - 0: reg
+      return ComputeSize(entry, 0, 0, false);
+    case kMem:  // lir operands - 0: base, 1: disp
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kThreadReg:  // lir operands - 0: disp, 1: reg
+      return ComputeSize(entry, 0, lir->operands[0], false);
+    case kRegReg:
+      return ComputeSize(entry, 0, 0, false);
+    case kRegRegStore:
+      return ComputeSize(entry, 0, 0, false);
+    case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
+      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+    case kRegArray:   // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+    case kRegThread:  // lir operands - 0: reg, 1: disp
+      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+    case kRegImm: {  // lir operands - 0: reg, 1: immediate
+      size_t size = ComputeSize(entry, 0, 0, false);
+      if (entry->skeleton.ax_opcode == 0) {
+        return size;
+      } else {
+        // AX opcodes don't require the modrm byte.
+        int reg = lir->operands[0];
+        return size - (reg == rAX ? 1 : 0);
+      }
+    }
+    case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kThreadImm:  // lir operands - 0: disp, 1: imm
+      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+    case kRegRegImm:  // lir operands - 0: reg, 1: reg, 2: imm
+      return ComputeSize(entry, 0, 0, false);
+    case kRegMemImm:  // lir operands - 0: reg, 1: base, 2: disp, 3: imm
+      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+    case kRegArrayImm:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm
+      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+    case kMovRegImm:  // lir operands - 0: reg, 1: immediate
+      return 1 + entry->skeleton.immediate_bytes;
+    case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0);
+    case kShiftMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false) -
+             (lir->operands[2] == 1 ? 1 : 0);
+    case kShiftArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true) -
+             (lir->operands[4] == 1 ? 1 : 0);
+    case kShiftRegCl:
+      return ComputeSize(entry, 0, 0, false);
+    case kShiftMemCl:  // lir operands - 0: base, 1: disp, 2: cl
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kShiftArrayCl:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kRegCond:  // lir operands - 0: reg, 1: cond
+      return ComputeSize(entry, 0, 0, false);
+    case kMemCond:  // lir operands - 0: base, 1: disp, 2: cond
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayCond:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kJcc:
+      if (lir->opcode == kX86Jcc8) {
+        return 2;  // opcode + rel8
+      } else {
+        DCHECK(lir->opcode == kX86Jcc32);
+        return 6;  // 2 byte opcode + rel32
+      }
+    case kJmp:
+      if (lir->opcode == kX86Jmp8) {
+        return 2;  // opcode + rel8
+      } else if (lir->opcode == kX86Jmp32) {
+        return 5;  // opcode + rel32
+      } else {
+        DCHECK(lir->opcode == kX86JmpR);
+        return 2;  // opcode + modrm
+      }
+    case kCall:
+      switch (lir->opcode) {
+        case kX86CallR: return 2;  // opcode modrm
+        case kX86CallM:  // lir operands - 0: base, 1: disp
+          return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+        case kX86CallA:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
+          return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+        case kX86CallT:  // lir operands - 0: disp
+          return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+        default:
+          break;
+      }
+      break;
+    case kPcRel:
+      if (entry->opcode == kX86PcRelLoadRA) {
+        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+        return ComputeSize(entry, lir->operands[1], 0x12345678, true);
+      } else {
+        DCHECK(entry->opcode == kX86PcRelAdr);
+        return 5; // opcode with reg + 4 byte immediate
+      }
+    case kMacro:
+      DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
+      return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
+          ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) -
+          (lir->operands[0] == rAX  ? 1 : 0);  // shorter ax encoding
+    default:
+      break;
+  }
+  UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name;
+  return 0;
+}
+
+static uint8_t ModrmForDisp(int base, int disp) {
+  // BP requires an explicit disp, so do not omit it in the 0 case
+  if (disp == 0 && base != rBP) {
+    return 0;
+  } else if (IS_SIMM8(disp)) {
+    return 1;
+  } else {
+    return 2;
+  }
+}
+
+void X86Mir2Lir::EmitDisp(int base, int disp) {
+  // BP requires an explicit disp, so do not omit it in the 0 case
+  if (disp == 0 && base != rBP) {
+    return;
+  } else if (IS_SIMM8(disp)) {
+    code_buffer_.push_back(disp & 0xFF);
+  } else {
+    code_buffer_.push_back(disp & 0xFF);
+    code_buffer_.push_back((disp >> 8) & 0xFF);
+    code_buffer_.push_back((disp >> 16) & 0xFF);
+    code_buffer_.push_back((disp >> 24) & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(entry->skeleton.modrm_opcode, 8);
+  DCHECK_LT(base, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+  code_buffer_.push_back(modrm);
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
+                       uint8_t base, int disp, uint8_t reg) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  DCHECK_LT(base, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
+  code_buffer_.push_back(modrm);
+  if (base == rX86_SP) {
+    // Special SIB for SP base
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+  }
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry,
+                       uint8_t reg, uint8_t base, int disp) {
+  // Opcode will flip operands.
+  EmitMemReg(entry, base, disp, reg);
+}
+
+void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                  int scale, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
+  code_buffer_.push_back(modrm);
+  DCHECK_LT(scale, 4);
+  DCHECK_LT(index, 8);
+  DCHECK_LT(base, 8);
+  uint8_t sib = (scale << 6) | (index << 3) | base;
+  code_buffer_.push_back(sib);
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                  uint8_t reg) {
+  // Opcode will flip operands.
+  EmitRegArray(entry, reg, base, index, scale, disp);
+}
+
+void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
+  DCHECK_NE(entry->skeleton.prefix1, 0);
+  code_buffer_.push_back(entry->skeleton.prefix1);
+  if (entry->skeleton.prefix2 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg1)) {
+    reg1 = reg1 & X86_FP_REG_MASK;
+  }
+  if (X86_FPREG(reg2)) {
+    reg2 = reg2 & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg1, 8);
+  DCHECK_LT(reg2, 8);
+  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
+                          uint8_t reg1, uint8_t reg2, int32_t imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg1)) {
+    reg1 = reg1 & X86_FP_REG_MASK;
+  }
+  if (X86_FPREG(reg2)) {
+    reg2 = reg2 & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg1, 8);
+  DCHECK_LT(reg2, 8);
+  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+                 << ") for instruction: " << entry->name;
+      break;
+  }
+}
+
+void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (reg == rAX && entry->skeleton.ax_opcode != 0) {
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
+  } else {
+    code_buffer_.push_back(entry->skeleton.opcode);
+    if (entry->skeleton.opcode == 0x0F) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode1);
+      if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+        code_buffer_.push_back(entry->skeleton.extra_opcode2);
+      } else {
+        DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+      }
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+    if (X86_FPREG(reg)) {
+      reg = reg & X86_FP_REG_MASK;
+    }
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    code_buffer_.push_back(modrm);
+  }
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+          << ") for instruction: " << entry->name;
+      break;
+  }
+}
+
+void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+          << ") for instruction: " << entry->name;
+      break;
+  }
+  DCHECK_EQ(entry->skeleton.ax_opcode, 0);
+}
+
+void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  DCHECK_LT(reg, 8);
+  code_buffer_.push_back(0xB8 + reg);
+  code_buffer_.push_back(imm & 0xFF);
+  code_buffer_.push_back((imm >> 8) & 0xFF);
+  code_buffer_.push_back((imm >> 16) & 0xFF);
+  code_buffer_.push_back((imm >> 24) & 0xFF);
+}
+
+void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (imm != 1) {
+    code_buffer_.push_back(entry->skeleton.opcode);
+  } else {
+    // Shorter encoding for 1 bit shift
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
+  }
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  if (imm != 1) {
+    DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
+    DCHECK(IS_SIMM8(imm));
+    code_buffer_.push_back(imm & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
+  DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0x0F, entry->skeleton.opcode);
+  code_buffer_.push_back(0x0F);
+  DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
+  code_buffer_.push_back(0x90 | condition);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
+}
+
+void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int rel) {
+  if (entry->opcode == kX86Jmp8) {
+    DCHECK(IS_SIMM8(rel));
+    code_buffer_.push_back(0xEB);
+    code_buffer_.push_back(rel & 0xFF);
+  } else if (entry->opcode == kX86Jmp32) {
+    code_buffer_.push_back(0xE9);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
+  } else {
+    DCHECK(entry->opcode == kX86JmpR);
+    code_buffer_.push_back(entry->skeleton.opcode);
+    uint8_t reg = static_cast<uint8_t>(rel);
+    DCHECK_LT(reg, 8);
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    code_buffer_.push_back(modrm);
+  }
+}
+
+void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc) {
+  DCHECK_LT(cc, 16);
+  if (entry->opcode == kX86Jcc8) {
+    DCHECK(IS_SIMM8(rel));
+    code_buffer_.push_back(0x70 | cc);
+    code_buffer_.push_back(rel & 0xFF);
+  } else {
+    DCHECK(entry->opcode == kX86Jcc32);
+    code_buffer_.push_back(0x0F);
+    code_buffer_.push_back(0x80 | cc);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+  code_buffer_.push_back(modrm);
+  if (base == rX86_SP) {
+    // Special SIB for SP base
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+  }
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) {
+  DCHECK_NE(entry->skeleton.prefix1, 0);
+  code_buffer_.push_back(entry->skeleton.prefix1);
+  if (entry->skeleton.prefix2 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, uint8_t reg,
+                      int base_or_table, uint8_t index, int scale, int table_or_disp) {
+  int disp;
+  if (entry->opcode == kX86PcRelLoadRA) {
+    Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
+    disp = tab_rec->offset;
+  } else {
+    DCHECK(entry->opcode == kX86PcRelAdr);
+    Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
+    disp = tab_rec->offset;
+  }
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg, 8);
+  if (entry->opcode == kX86PcRelLoadRA) {
+    code_buffer_.push_back(entry->skeleton.opcode);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
+    code_buffer_.push_back(modrm);
+    DCHECK_LT(scale, 4);
+    DCHECK_LT(index, 8);
+    DCHECK_LT(base_or_table, 8);
+    uint8_t base = static_cast<uint8_t>(base_or_table);
+    uint8_t sib = (scale << 6) | (index << 3) | base;
+    code_buffer_.push_back(sib);
+    DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+  } else {
+    code_buffer_.push_back(entry->skeleton.opcode + reg);
+  }
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+}
+
+void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) {
+  DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
+  code_buffer_.push_back(0xE8);  // call +0
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+
+  DCHECK_LT(reg, 8);
+  code_buffer_.push_back(0x58 + reg);  // pop reg
+
+  EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+}
+
+void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
+  UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
+                         << BuildInsnString(entry->fmt, lir, 0);
+  for (int i = 0; i < GetInsnSize(lir); ++i) {
+    code_buffer_.push_back(0xCC);  // push breakpoint instruction - int 3
+  }
+}
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.  In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
+  LIR *lir;
+  AssemblerStatus res = kSuccess;  // Assume success
+
+  const bool kVerbosePcFixup = false;
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    if (lir->opcode < 0) {
+      continue;
+    }
+
+    if (lir->flags.is_nop) {
+      continue;
+    }
+
+    if (lir->flags.pcRelFixup) {
+      switch (lir->opcode) {
+        case kX86Jcc8: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          int delta = 0;
+          uintptr_t pc;
+          if (IS_SIMM8(lir->operands[0])) {
+            pc = lir->offset + 2 /* opcode + rel8 */;
+          } else {
+            pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+          }
+          uintptr_t target = target_lir->offset;
+          delta = target - pc;
+          if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for JCC growth at " << lir->offset
+                  << " delta: " << delta << " old delta: " << lir->operands[0];
+            }
+            lir->opcode = kX86Jcc32;
+            SetupResourceMasks(lir);
+            res = kRetryAll;
+          }
+          if (kVerbosePcFixup) {
+            LOG(INFO) << "Source:";
+            DumpLIRInsn(lir, 0);
+            LOG(INFO) << "Target:";
+            DumpLIRInsn(target_lir, 0);
+            LOG(INFO) << "Delta " << delta;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jcc32: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+          uintptr_t target = target_lir->offset;
+          int delta = target - pc;
+          if (kVerbosePcFixup) {
+            LOG(INFO) << "Source:";
+            DumpLIRInsn(lir, 0);
+            LOG(INFO) << "Target:";
+            DumpLIRInsn(target_lir, 0);
+            LOG(INFO) << "Delta " << delta;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jmp8: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          int delta = 0;
+          uintptr_t pc;
+          if (IS_SIMM8(lir->operands[0])) {
+            pc = lir->offset + 2 /* opcode + rel8 */;
+          } else {
+            pc = lir->offset + 5 /* opcode + rel32 */;
+          }
+          uintptr_t target = target_lir->offset;
+          delta = target - pc;
+          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
+            // Useless branch
+            lir->flags.is_nop = true;
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for useless branch at " << lir->offset;
+            }
+            res = kRetryAll;
+          } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for JMP growth at " << lir->offset;
+            }
+            lir->opcode = kX86Jmp32;
+            SetupResourceMasks(lir);
+            res = kRetryAll;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jmp32: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
+          uintptr_t target = target_lir->offset;
+          int delta = target - pc;
+          lir->operands[0] = delta;
+          break;
+        }
+        default:
+          break;
+      }
+    }
+
+    /*
+     * If one of the pc-relative instructions expanded we'll have
+     * to make another pass.  Don't bother to fully assemble the
+     * instruction.
+     */
+    if (res != kSuccess) {
+      continue;
+    }
+    CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size());
+    const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode];
+    size_t starting_cbuf_size = code_buffer_.size();
+    switch (entry->kind) {
+      case kData:  // 4 bytes of data
+        code_buffer_.push_back(lir->operands[0]);
+        break;
+      case kNullary:  // 1 byte of opcode
+        DCHECK_EQ(0, entry->skeleton.prefix1);
+        DCHECK_EQ(0, entry->skeleton.prefix2);
+        code_buffer_.push_back(entry->skeleton.opcode);
+        if (entry->skeleton.extra_opcode1 != 0) {
+          code_buffer_.push_back(entry->skeleton.extra_opcode1);
+          if (entry->skeleton.extra_opcode2 != 0) {
+            code_buffer_.push_back(entry->skeleton.extra_opcode2);
+          }
+        } else {
+          DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+        }
+        DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+        DCHECK_EQ(0, entry->skeleton.ax_opcode);
+        DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+        break;
+      case kReg:  // lir operands - 0: reg
+        EmitOpReg(entry, lir->operands[0]);
+        break;
+      case kMem:  // lir operands - 0: base, 1: disp
+        EmitOpMem(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
+        EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+        EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                     lir->operands[3], lir->operands[4]);
+        break;
+      case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
+        EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kRegArray:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+        EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                     lir->operands[3], lir->operands[4]);
+        break;
+      case kRegThread:  // lir operands - 0: reg, 1: disp
+        EmitRegThread(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegReg:  // lir operands - 0: reg1, 1: reg2
+        EmitRegReg(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
+        EmitRegReg(entry, lir->operands[1], lir->operands[0]);
+        break;
+      case kRegRegImm:
+        EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kThreadImm:  // lir operands - 0: disp, 1: immediate
+        EmitThreadImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kMovRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitMovRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kShiftRegCl: // lir operands - 0: reg, 1: cl
+        EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegCond:  // lir operands - 0: reg, 1: condition
+        EmitRegCond(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kJmp:  // lir operands - 0: rel
+        EmitJmp(entry, lir->operands[0]);
+        break;
+      case kJcc:  // lir operands - 0: rel, 1: CC, target assigned
+        EmitJcc(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kCall:
+        switch (entry->opcode) {
+          case kX86CallM:  // lir operands - 0: base, 1: disp
+            EmitCallMem(entry, lir->operands[0], lir->operands[1]);
+            break;
+          case kX86CallT:  // lir operands - 0: disp
+            EmitCallThread(entry, lir->operands[0]);
+            break;
+          default:
+            EmitUnimplemented(entry, lir);
+            break;
+        }
+        break;
+      case kPcRel:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+        EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                  lir->operands[3], lir->operands[4]);
+        break;
+      case kMacro:
+        EmitMacro(entry, lir->operands[0], lir->offset);
+        break;
+      default:
+        EmitUnimplemented(entry, lir);
+        break;
+    }
+    CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
+             code_buffer_.size() - starting_cbuf_size)
+        << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
+  }
+  return res;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
new file mode 100644
index 0000000..d60be72
--- /dev/null
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case)
+{
+  // TODO
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs.
+ */
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpSparseSwitchTable(table);
+  }
+  int entries = table[1];
+  const int* keys = reinterpret_cast<const int*>(&table[2]);
+  const int* targets = &keys[entries];
+  rl_src = LoadValue(rl_src, kCoreReg);
+  for (int i = 0; i < entries; i++) {
+    int key = keys[i];
+    BasicBlock* case_block =
+        mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+    OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
+                   &block_label_list_[case_block->id]);
+  }
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * mov  r_val, ..
+ * call 0
+ * pop  r_start_of_method
+ * sub  r_start_of_method, ..
+ * mov  r_key_reg, r_val
+ * sub  r_key_reg, low_key
+ * cmp  r_key_reg, size-1  ; bound check
+ * ja   done
+ * mov  r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
+ * add  r_start_of_method, r_disp
+ * jmp  r_start_of_method
+ * done:
+ */
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpPackedSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable *>(arena_->NewMem(sizeof(SwitchTable), true,
+                                                ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // Get the switch value
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int start_of_method_reg = AllocTemp();
+  // Materialize a pointer to the switch table
+  //NewLIR0(kX86Bkpt);
+  NewLIR1(kX86StartOfMethod, start_of_method_reg);
+  int low_key = s4FromSwitchData(&table[2]);
+  int keyReg;
+  // Remove the bias, if necessary
+  if (low_key == 0) {
+    keyReg = rl_src.low_reg;
+  } else {
+    keyReg = AllocTemp();
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+  }
+  // Bounds check - if < 0 or >= size continue following switch
+  OpRegImm(kOpCmp, keyReg, size-1);
+  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+
+  // Load the displacement from the switch table
+  int disp_reg = AllocTemp();
+  NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
+          reinterpret_cast<uintptr_t>(tab_rec));
+  // Add displacement to start of method
+  OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
+  // ..and go!
+  LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg);
+  tab_rec->anchor = switch_branch;
+
+  /* branch_over target here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ *  ushort ident = 0x0300   magic value
+ *  ushort width            width of each element in the table
+ *  uint   size             number of elements in the table
+ *  ubyte  data[size*width] table of data values (may contain a single-byte
+ *                          padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  // Add the table to the list - we'll process it later
+  FillArrayData *tab_rec =
+      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                 ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  uint16_t width = tab_rec->table[1];
+  uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+  tab_rec->size = (size * width) + 8;
+
+  fill_array_data_.Insert(tab_rec);
+
+  // Making a call - use explicit registers
+  FlushAllRegs();   /* Everything to home location */
+  LoadValueDirectFixed(rl_src, rX86_ARG0);
+  // Materialize a pointer to the fill data image
+  NewLIR1(kX86StartOfMethod, rX86_ARG2);
+  NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+                          rX86_ARG1, true);
+}
+
+void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rCX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rCX, opt_flags);
+  // If lock is unheld, try to grab it quickly with compare and exchange
+  // TODO: copy and clear hash state?
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR2(kX86Xor32RR, rAX, rAX);
+  NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+  // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+}
+
+void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rAX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rAX, opt_flags);
+  // If lock is held by the current thread, clear it to quickly release it
+  // TODO: clear hash state?
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR3(kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
+  OpRegReg(kOpSub, rCX, rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+  NewLIR3(kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
+}
+
+void X86Mir2Lir::GenMoveException(RegLocation rl_dest)
+{
+  int ex_offset = Thread::ExceptionOffset().Int32Value();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
+  NewLIR2(kX86Mov32TI, ex_offset, 0);
+  StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
+{
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  NewLIR2(kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
+                   kUnsignedByte);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
+}
+
+void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
+{
+  /*
+   * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live.  Let the register
+   * allocation mechanism know so it doesn't try to use any of them when
+   * expanding the frame or flushing.  This leaves the utility
+   * code with no spare temps.
+   */
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
+
+  /* Build frame, return address already on stack */
+  OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
+
+  /*
+   * We can safely skip the stack overflow check if we're
+   * a leaf *and* our frame size < fudge factor.
+   */
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+                (static_cast<size_t>(frame_size_) <
+                Thread::kStackOverflowReservedBytes));
+  NewLIR0(kPseudoMethodEntry);
+  /* Spill core callee saves */
+  SpillCoreRegs();
+  /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+  DCHECK_EQ(num_fp_spills_, 0);
+  if (!skip_overflow_check) {
+    // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
+    LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpCondBranch(kCondUlt, tgt);
+    // Remember branch target - will process later
+    throw_launchpads_.Insert(tgt);
+  }
+
+  FlushIns(ArgLocs, rl_method);
+
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
+}
+
+void X86Mir2Lir::GenExitSequence() {
+  /*
+   * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
+   * allocated by the register utilities as temps.
+   */
+  LockTemp(rX86_RET0);
+  LockTemp(rX86_RET1);
+
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
+  /* Remove frame except for return address */
+  OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+  NewLIR0(kX86Ret);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
new file mode 100644
index 0000000..3e30141
--- /dev/null
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+
+#include "dex/compiler_internals.h"
+#include "x86_lir.h"
+
+namespace art {
+
+class X86Mir2Lir : public Mir2Lir {
+  public:
+
+    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+    // Required for target - codegen helpers.
+    bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    int LoadHelper(int offset);
+    LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    LIR* LoadConstantNoClobber(int r_dest, int value);
+    LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    void MarkGCCard(int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    bool IsFpReg(int reg);
+    bool SameRegType(int reg1, int reg2);
+    int AllocTypedTemp(bool fp_hint, int reg_class);
+    int AllocTypedTempPair(bool fp_hint, int reg_class);
+    int S2d(int low_reg, int high_reg);
+    int TargetReg(SpecialTargetRegister reg);
+    RegisterInfo* GetRegInfo(int reg);
+    RegLocation GetReturnAlt();
+    RegLocation GetReturnWideAlt();
+    RegLocation LocCReturn();
+    RegLocation LocCReturnDouble();
+    RegLocation LocCReturnFloat();
+    RegLocation LocCReturnWide();
+    uint32_t FpRegMask();
+    uint64_t GetRegMaskCommon(int reg);
+    void AdjustSpillMask();
+    void ClobberCalleeSave();
+    void FlushReg(int reg);
+    void FlushRegWide(int reg1, int reg2);
+    void FreeCallTemps();
+    void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    void LockCallTemps();
+    void MarkPreservedSingle(int v_reg, int reg);
+    void CompilerInitializeRegAlloc();
+
+    // Required for target - miscellaneous.
+    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    void SetupTargetResourceMasks(LIR* lir);
+    const char* GetTargetInstFmt(int opcode);
+    const char* GetTargetInstName(int opcode);
+    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    uint64_t GetPCUseDefEncoding();
+    uint64_t GetTargetInstFlags(int opcode);
+    int GetInsnSize(LIR* lir);
+    bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2);
+    void GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_dest, int scale);
+    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    bool GenInlinedSqrt(CallInfo* info);
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenDivZeroCheck(int reg_lo, int reg_hi);
+    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    void GenExitSequence();
+    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    void GenSelect(BasicBlock* bb, MIR* mir);
+    void GenMemBarrier(MemBarrierKind barrier_kind);
+    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    void GenMoveException(RegLocation rl_dest);
+    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+                                               int lit, int first_bit, int second_bit);
+    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+
+    // Single operation generators.
+    LIR* OpUnconditionalBranch(LIR* target);
+    LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    LIR* OpFpRegCopy(int r_dest, int r_src);
+    LIR* OpIT(ConditionCode cond, const char* guide);
+    LIR* OpMem(OpKind op, int rBase, int disp);
+    LIR* OpPcRelLoad(int reg, LIR* target);
+    LIR* OpReg(OpKind op, int r_dest_src);
+    LIR* OpRegCopy(int r_dest, int r_src);
+    LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    LIR* OpTestSuspend(LIR* target);
+    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpVldm(int rBase, int count);
+    LIR* OpVstm(int rBase, int count);
+    void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    void OpTlsCmp(int offset, int val);
+
+    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void SpillCoreRegs();
+    void UnSpillCoreRegs();
+    static const X86EncodingMap EncodingMap[kX86Last];
+    bool InexpensiveConstantInt(int32_t value);
+    bool InexpensiveConstantFloat(int32_t value);
+    bool InexpensiveConstantLong(int64_t value);
+    bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void EmitDisp(int base, int disp);
+    void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
+    void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
+    void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
+    void EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                      int scale, int disp);
+    void EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                      uint8_t reg);
+    void EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp);
+    void EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2);
+    void EmitRegRegImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
+    void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
+    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl);
+    void EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition);
+    void EmitJmp(const X86EncodingMap* entry, int rel);
+    void EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc);
+    void EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitCallThread(const X86EncodingMap* entry, int disp);
+    void EmitPcRel(const X86EncodingMap* entry, uint8_t reg, int base_or_table, uint8_t index,
+                   int scale, int table_or_disp);
+    void EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset);
+    void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
new file mode 100644
index 0000000..906b4cc
--- /dev/null
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
+                                 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  X86OpCode op = kX86Nop;
+  RegLocation rl_result;
+
+  /*
+   * Don't attempt to optimize register usage since these opcodes call out to
+   * the handlers.
+   */
+  switch (opcode) {
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_FLOAT:
+      op = kX86AddssRR;
+      break;
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT:
+      op = kX86SubssRR;
+      break;
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT:
+      op = kX86DivssRR;
+      break;
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT:
+      op = kX86MulssRR;
+      break;
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_FLOAT:
+      GenNegFloat(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  int r_dest = rl_result.low_reg;
+  int r_src1 = rl_src1.low_reg;
+  int r_src2 = rl_src2.low_reg;
+  if (r_dest == r_src2) {
+    r_src2 = AllocTempFloat();
+    OpRegCopy(r_src2, r_dest);
+  }
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  X86OpCode op = kX86Nop;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::ADD_DOUBLE:
+      op = kX86AddsdRR;
+      break;
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE:
+      op = kX86SubsdRR;
+      break;
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE:
+      op = kX86DivsdRR;
+      break;
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE:
+      op = kX86MulsdRR;
+      break;
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_DOUBLE:
+      GenNegDouble(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
+  DCHECK(rl_src1.wide);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
+  DCHECK(rl_src2.wide);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_result.wide);
+  int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+  int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+  int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+  if (r_dest == r_src2) {
+    r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
+    OpRegCopy(r_src2, r_dest);
+  }
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) {
+  RegisterClass rcSrc = kFPReg;
+  X86OpCode op = kX86Nop;
+  int src_reg;
+  RegLocation rl_result;
+  switch (opcode) {
+    case Instruction::INT_TO_FLOAT:
+      rcSrc = kCoreReg;
+      op = kX86Cvtsi2ssRR;
+      break;
+    case Instruction::DOUBLE_TO_FLOAT:
+      rcSrc = kFPReg;
+      op = kX86Cvtsd2ssRR;
+      break;
+    case Instruction::FLOAT_TO_DOUBLE:
+      rcSrc = kFPReg;
+      op = kX86Cvtss2sdRR;
+      break;
+    case Instruction::INT_TO_DOUBLE:
+      rcSrc = kCoreReg;
+      op = kX86Cvtsi2sdRR;
+      break;
+    case Instruction::FLOAT_TO_INT: {
+      rl_src = LoadValue(rl_src, kFPReg);
+      src_reg = rl_src.low_reg;
+      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempFloat();
+
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComissRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+    case Instruction::DOUBLE_TO_INT: {
+      rl_src = LoadValueWide(rl_src, kFPReg);
+      src_reg = rl_src.low_reg;
+      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
+
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComisdRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+    case Instruction::LONG_TO_DOUBLE:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      return;
+    case Instruction::LONG_TO_FLOAT:
+      // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      return;
+    case Instruction::FLOAT_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      return;
+    case Instruction::DOUBLE_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      return;
+    default:
+      LOG(INFO) << "Unexpected opcode: " << opcode;
+  }
+  if (rl_src.wide) {
+    rl_src = LoadValueWide(rl_src, rcSrc);
+    src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+  } else {
+    rl_src = LoadValue(rl_src, rcSrc);
+    src_reg = rl_src.low_reg;
+  }
+  if (rl_dest.wide) {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) {
+  bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
+  bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
+  int src_reg1;
+  int src_reg2;
+  if (single) {
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    src_reg1 = rl_src1.low_reg;
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    src_reg2 = rl_src2.low_reg;
+  } else {
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+  }
+  // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+  ClobberSReg(rl_dest.s_reg_low);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
+  if (single) {
+    NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
+  } else {
+    NewLIR2(kX86UcomisdRR, src_reg1, src_reg2);
+  }
+  LIR* branch = NULL;
+  if (unordered_gt) {
+    branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+  }
+  // If the result reg can't be byte accessed, use a jump and move instead of a set.
+  if (rl_result.low_reg >= 4) {
+    LIR* branch2 = NULL;
+    if (unordered_gt) {
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
+    } else {
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
+    }
+    branch2->target = NewLIR0(kPseudoTargetLabel);
+  } else {
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+  }
+  NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
+  if (unordered_gt) {
+    branch->target = NewLIR0(kPseudoTargetLabel);
+  }
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) {
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  LIR* branch = NULL;
+  RegLocation rl_src1;
+  RegLocation rl_src2;
+  if (is_double) {
+    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+            S2d(rl_src2.low_reg, rl_src2.high_reg));
+  } else {
+    rl_src1 = mir_graph_->GetSrc(mir, 0);
+    rl_src2 = mir_graph_->GetSrc(mir, 1);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+  }
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  switch (ccode) {
+    case kCondEq:
+      if (!gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      break;
+    case kCondNe:
+      if (!gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      break;
+    case kCondLt:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      ccode = kCondCs;
+      break;
+    case kCondLe:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      ccode = kCondLs;
+      break;
+    case kCondGt:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      ccode = kCondHi;
+      break;
+    case kCondGe:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      ccode = kCondCc;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCondBranch(ccode, taken);
+}
+
+void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+
+
+} //  namespace art
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
new file mode 100644
index 0000000..97d9d2d
--- /dev/null
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mirror/array.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/*
+ * Perform register memory operation.
+ */
+LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
+                                int reg1, int base, int offset, ThrowKind kind)
+{
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
+                    current_dalvik_offset_, reg1, base, offset);
+  OpRegMem(kOpCmp, reg1, base, offset);
+  LIR* branch = OpCondBranch(c_code, tgt);
+  // Remember branch target - will process later
+  throw_launchpads_.Insert(tgt);
+  return branch;
+}
+
+/*
+ * Compare two 64-bit values
+ *    x = y     return  0
+ *    x < y     return -1
+ *    x > y     return  1
+ */
+void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) - (r3:r2)
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  NewLIR2(kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r2, r2);
+  OpReg(kOpNeg, r2);         // r2 = -r2
+  OpRegReg(kOpOr, r0, r1);   // r0 = high | low - sets ZF
+  NewLIR2(kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r0, r0);
+  OpRegReg(kOpOr, r0, r2);   // r0 = r0 | r2
+  RegLocation rl_result = LocCReturn();
+  StoreValue(rl_dest, rl_result);
+}
+
+X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
+  switch (cond) {
+    case kCondEq: return kX86CondEq;
+    case kCondNe: return kX86CondNe;
+    case kCondCs: return kX86CondC;
+    case kCondCc: return kX86CondNc;
+    case kCondMi: return kX86CondS;
+    case kCondPl: return kX86CondNs;
+    case kCondVs: return kX86CondO;
+    case kCondVc: return kX86CondNo;
+    case kCondHi: return kX86CondA;
+    case kCondLs: return kX86CondBe;
+    case kCondGe: return kX86CondGe;
+    case kCondLt: return kX86CondL;
+    case kCondGt: return kX86CondG;
+    case kCondLe: return kX86CondLe;
+    case kCondAl:
+    case kCondNv: LOG(FATAL) << "Should not reach here";
+  }
+  return kX86CondO;
+}
+
+LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
+                             LIR* target)
+{
+  NewLIR2(kX86Cmp32RR, src1, src2);
+  X86ConditionCode cc = X86ConditionEncoding(cond);
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+                        cc);
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
+                                int check_value, LIR* target)
+{
+  if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
+    // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
+    NewLIR2(kX86Test32RR, reg, reg);
+  } else {
+    NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
+  }
+  X86ConditionCode cc = X86ConditionEncoding(cond);
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
+{
+  if (X86_FPREG(r_dest) || X86_FPREG(r_src))
+    return OpFpRegCopy(r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
+                    r_dest, r_src);
+  if (r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src)
+{
+  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
+  return res;
+}
+
+void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
+                               int src_lo, int src_hi)
+{
+  bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
+  bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
+  assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
+  assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
+  if (dest_fp) {
+    if (src_fp) {
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+    } else {
+      // TODO: Prevent this from happening in the code. The result is often
+      // unused or could have been loaded more easily from memory.
+      NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
+      NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
+      NewLIR2(kX86PsllqRI, dest_hi, 32);
+      NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
+    }
+  } else {
+    if (src_fp) {
+      NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
+      NewLIR2(kX86PsrlqRI, src_lo, 32);
+      NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
+    } else {
+      // Handle overlap
+      if (src_hi == dest_lo) {
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+      } else {
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+      }
+    }
+  }
+}
+
+void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
+}
+
+void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+  LIR* taken = &block_label_list_[bb->taken->id];
+  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  // Swap operands and condition code to prevent use of zero flag.
+  if (ccode == kCondLe || ccode == kCondGt) {
+    // Compute (r3:r2) = (r3:r2) - (r1:r0)
+    OpRegReg(kOpSub, r2, r0);  // r2 = r2 - r0
+    OpRegReg(kOpSbc, r3, r1);  // r3 = r3 - r1 - CF
+  } else {
+    // Compute (r1:r0) = (r1:r0) - (r3:r2)
+    OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+    OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  }
+  switch (ccode) {
+    case kCondEq:
+    case kCondNe:
+      OpRegReg(kOpOr, r0, r1);  // r0 = r0 | r1
+      break;
+    case kCondLe:
+      ccode = kCondGe;
+      break;
+    case kCondGt:
+      ccode = kCondLt;
+      break;
+    case kCondLt:
+    case kCondGe:
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCondBranch(ccode, taken);
+}
+
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
+                                     int lit, bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
+  return rl_dest;
+}
+
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
+                                  int reg_hi, bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRem for x86";
+  return rl_dest;
+}
+
+bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
+{
+  DCHECK_EQ(cu_->instruction_set, kX86);
+  RegLocation rl_src1 = info->args[0];
+  RegLocation rl_src2 = info->args[1];
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  DCHECK_EQ(cu_->instruction_set, kX86);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
+{
+  NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+}
+
+void X86Mir2Lir::OpTlsCmp(int offset, int val)
+{
+  NewLIR2(kX86Cmp16TI8, offset, val);
+}
+
+bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
+  LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
+  return NULL;
+}
+
+LIR* X86Mir2Lir::OpVldm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVldm for x86";
+  return NULL;
+}
+
+LIR* X86Mir2Lir::OpVstm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVstm for x86";
+  return NULL;
+}
+
+void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                               RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit)
+{
+  int t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+  FreeTemp(t_reg);
+  if (first_bit != 0) {
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+  }
+}
+
+void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
+{
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
+  FreeTemp(t_reg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* X86Mir2Lir::OpTestSuspend(LIR* target)
+{
+  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+}
+
+// Decrement register and branch on condition
+LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
+{
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+  return false;
+}
+
+LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide)
+{
+  LOG(FATAL) << "Unexpected use of OpIT in x86";
+  return NULL;
+}
+
+void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenX86Long for x86";
+}
+void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) + (r2:r3)
+  OpRegReg(kOpAdd, r0, r2);  // r0 = r0 + r2
+  OpRegReg(kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) + (r2:r3)
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) & (r2:r3)
+  OpRegReg(kOpAnd, r0, r2);  // r0 = r0 & r2
+  OpRegReg(kOpAnd, r1, r3);  // r1 = r1 & r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenOrLong(RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) | (r2:r3)
+  OpRegReg(kOpOr, r0, r2);  // r0 = r0 | r2
+  OpRegReg(kOpOr, r1, r3);  // r1 = r1 | r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenXorLong(RegLocation rl_dest,
+                            RegLocation rl_src1, RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
+  OpRegReg(kOpXor, r0, r2);  // r0 = r0 ^ r2
+  OpRegReg(kOpXor, r1, r3);  // r1 = r1 ^ r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src, r0, r1);
+  // Compute (r1:r0) = -(r1:r0)
+  OpRegReg(kOpNeg, r0, r0);  // r0 = -r0
+  OpRegImm(kOpAdc, r1, 0);   // r1 = r1 + CF
+  OpRegReg(kOpNeg, r1, r1);  // r1 = -r1
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+  case kOpCmp: opcode = kX86Cmp32RT;  break;
+  case kOpMov: opcode = kX86Mov32RT;  break;
+  default:
+    LOG(FATAL) << "Bad opcode: " << op;
+    break;
+  }
+  NewLIR2(opcode, r_dest, thread_offset);
+}
+
+/*
+ * Generate array load
+ */
+void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  RegLocation rl_result;
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+                   len_offset, kThrowArrayBounds);
+  }
+  if ((size == kLong) || (size == kDouble)) {
+    int reg_addr = AllocTemp();
+    OpLea(reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+    FreeTemp(rl_array.low_reg);
+    FreeTemp(rl_index.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+    LoadBaseIndexedDisp(reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+                        rl_result.high_reg, size, INVALID_SREG);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
+                        data_offset, rl_result.low_reg, INVALID_REG, size,
+                        INVALID_SREG);
+
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
+  }
+  if ((size == kLong) || (size == kDouble)) {
+    rl_src = LoadValueWide(rl_src, reg_class);
+  } else {
+    rl_src = LoadValue(rl_src, reg_class);
+  }
+  // If the src reg can't be byte accessed, move it to a temp first.
+  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+    int temp = AllocTemp();
+    OpRegCopy(temp, rl_src.low_reg);
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+                         INVALID_REG, size, INVALID_SREG);
+  } else {
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+                         rl_src.high_reg, size, INVALID_SREG);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
+
+  int r_value = TargetReg(kArg0);  // Register holding value
+  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
+  int r_array = TargetReg(kArg2);  // Register holding array
+  int r_index = TargetReg(kArg3);  // Register holding index into array
+
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
+
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+
+  // Store of null?
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
+
+  // Get the array's class.
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+                          r_array_class, true);
+  // Redo LoadValues in case they didn't survive the call.
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
+  r_array_class = INVALID_REG;
+
+  // Branch here if value to be stored == null
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  null_value_check->target = target;
+
+  // make an extra temp available for card mark below
+  FreeTemp(TargetReg(kArg1));
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
+  }
+  StoreBaseIndexedDisp(r_array, r_index, scale,
+                       data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
+  }
+}
+
+void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift)
+{
+  // Default implementation is just to ignore the constant case.
+  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+}
+
+void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  // Default - bail to non-const handler.
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
new file mode 100644
index 0000000..c421ef3
--- /dev/null
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+#include <string>
+
+namespace art {
+
+//FIXME: restore "static" when usage uncovered
+/*static*/ int core_regs[] = {
+  rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
+#ifdef TARGET_REX_SUPPORT
+  r8, r9, r10, r11, r12, r13, r14, 15
+#endif
+};
+/*static*/ int ReservedRegs[] = {rX86_SP};
+/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
+/*static*/ int FpRegs[] = {
+  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+/*static*/ int fp_temps[] = {
+  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+
+RegLocation X86Mir2Lir::LocCReturn()
+{
+  RegLocation res = X86_LOC_C_RETURN;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnWide()
+{
+  RegLocation res = X86_LOC_C_RETURN_WIDE;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnFloat()
+{
+  RegLocation res = X86_LOC_C_RETURN_FLOAT;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnDouble()
+{
+  RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+  return res;
+}
+
+// Return a target-dependent special register.
+int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+  int res = INVALID_REG;
+  switch (reg) {
+    case kSelf: res = rX86_SELF; break;
+    case kSuspend: res =  rX86_SUSPEND; break;
+    case kLr: res =  rX86_LR; break;
+    case kPc: res =  rX86_PC; break;
+    case kSp: res =  rX86_SP; break;
+    case kArg0: res = rX86_ARG0; break;
+    case kArg1: res = rX86_ARG1; break;
+    case kArg2: res = rX86_ARG2; break;
+    case kArg3: res = rX86_ARG3; break;
+    case kFArg0: res = rX86_FARG0; break;
+    case kFArg1: res = rX86_FARG1; break;
+    case kFArg2: res = rX86_FARG2; break;
+    case kFArg3: res = rX86_FARG3; break;
+    case kRet0: res = rX86_RET0; break;
+    case kRet1: res = rX86_RET1; break;
+    case kInvokeTgt: res = rX86_INVOKE_TGT; break;
+    case kCount: res = rX86_COUNT; break;
+  }
+  return res;
+}
+
+// Create a double from a pair of singles.
+int X86Mir2Lir::S2d(int low_reg, int high_reg)
+{
+  return X86_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t X86Mir2Lir::FpRegMask()
+{
+  return X86_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool X86Mir2Lir::SameRegType(int reg1, int reg2)
+{
+  return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t X86Mir2Lir::GetRegMaskCommon(int reg)
+{
+  uint64_t seed;
+  int shift;
+  int reg_id;
+
+  reg_id = reg & 0xf;
+  /* Double registers in x86 are just a single FP register */
+  seed = 1;
+  /* FP register starts at bit position 16 */
+  shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
+  /* Expand the double register id into single offset */
+  shift += reg_id;
+  return (seed << shift);
+}
+
+uint64_t X86Mir2Lir::GetPCUseDefEncoding()
+{
+  /*
+   * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
+   * able to clean up some of the x86/Arm_Mips differences
+   */
+  LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
+  return 0ULL;
+}
+
+void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir)
+{
+  DCHECK_EQ(cu_->instruction_set, kX86);
+
+  // X86-specific resource map setup here.
+  uint64_t flags = X86Mir2Lir::EncodingMap[lir->opcode].flags;
+
+  if (flags & REG_USE_SP) {
+    lir->use_mask |= ENCODE_X86_REG_SP;
+  }
+
+  if (flags & REG_DEF_SP) {
+    lir->def_mask |= ENCODE_X86_REG_SP;
+  }
+
+  if (flags & REG_DEFA) {
+    SetupRegMask(&lir->def_mask, rAX);
+  }
+
+  if (flags & REG_DEFD) {
+    SetupRegMask(&lir->def_mask, rDX);
+  }
+  if (flags & REG_USEA) {
+    SetupRegMask(&lir->use_mask, rAX);
+  }
+
+  if (flags & REG_USEC) {
+    SetupRegMask(&lir->use_mask, rCX);
+  }
+
+  if (flags & REG_USED) {
+    SetupRegMask(&lir->use_mask, rDX);
+  }
+}
+
+/* For dumping instructions */
+static const char* x86RegName[] = {
+  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+static const char* x86CondName[] = {
+  "O",
+  "NO",
+  "B/NAE/C",
+  "NB/AE/NC",
+  "Z/EQ",
+  "NZ/NE",
+  "BE/NA",
+  "NBE/A",
+  "S",
+  "NS",
+  "P/PE",
+  "NP/PO",
+  "L/NGE",
+  "NL/GE",
+  "LE/NG",
+  "NLE/G"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.cc.
+ */
+std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+  std::string buf;
+  size_t i = 0;
+  size_t fmt_len = strlen(fmt);
+  while (i < fmt_len) {
+    if (fmt[i] != '!') {
+      buf += fmt[i];
+      i++;
+    } else {
+      i++;
+      DCHECK_LT(i, fmt_len);
+      char operand_number_ch = fmt[i];
+      i++;
+      if (operand_number_ch == '!') {
+        buf += "!";
+      } else {
+        int operand_number = operand_number_ch - '0';
+        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
+        DCHECK_LT(i, fmt_len);
+        int operand = lir->operands[operand_number];
+        switch (fmt[i]) {
+          case 'c':
+            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
+            buf += x86CondName[operand];
+            break;
+          case 'd':
+            buf += StringPrintf("%d", operand);
+            break;
+          case 'p': {
+            SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+            buf += StringPrintf("0x%08x", tab_rec->offset);
+            break;
+          }
+          case 'r':
+            if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
+              int fp_reg = operand & X86_FP_REG_MASK;
+              buf += StringPrintf("xmm%d", fp_reg);
+            } else {
+              DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
+              buf += x86RegName[operand];
+            }
+            break;
+          case 't':
+            buf += StringPrintf("0x%08x (L%p)",
+                                reinterpret_cast<uint32_t>(base_addr)
+                                + lir->offset + operand, lir->target);
+            break;
+          default:
+            buf += StringPrintf("DecodeError '%c'", fmt[i]);
+            break;
+        }
+        i++;
+      }
+    }
+  }
+  return buf;
+}
+
+void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+{
+  char buf[256];
+  buf[0] = 0;
+
+  if (mask == ENCODE_ALL) {
+    strcpy(buf, "all");
+  } else {
+    char num[8];
+    int i;
+
+    for (i = 0; i < kX86RegEnd; i++) {
+      if (mask & (1ULL << i)) {
+        sprintf(num, "%d ", i);
+        strcat(buf, num);
+      }
+    }
+
+    if (mask & ENCODE_CCODE) {
+      strcat(buf, "cc ");
+    }
+    /* Memory bits */
+    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+      sprintf(buf + strlen(buf), "dr%d%s", x86LIR->alias_info & 0xffff,
+              (x86LIR->alias_info & 0x80000000) ? "(+1)" : "");
+    }
+    if (mask & ENCODE_LITERAL) {
+      strcat(buf, "lit ");
+    }
+
+    if (mask & ENCODE_HEAP_REF) {
+      strcat(buf, "heap ");
+    }
+    if (mask & ENCODE_MUST_NOT_ALIAS) {
+      strcat(buf, "noalias ");
+    }
+  }
+  if (buf[0]) {
+    LOG(INFO) << prefix << ": " <<  buf;
+  }
+}
+
+void X86Mir2Lir::AdjustSpillMask() {
+  // Adjustment for LR spilling, x86 has no LR so nothing to do here
+  core_spill_mask_ |= (1 << rRET);
+  num_core_spills_++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted.  Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask.  Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg)
+{
+  UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
+#if 0
+  LOG(FATAL) << "No support yet for promoted FP regs";
+#endif
+}
+
+void X86Mir2Lir::FlushRegWide(int reg1, int reg2)
+{
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
+  DCHECK(info1 && info2 && info1->pair && info2->pair &&
+         (info1->partner == info2->reg) &&
+         (info2->partner == info1->reg));
+  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+    if (!(info1->is_temp && info2->is_temp)) {
+      /* Should not happen.  If it does, there's a problem in eval_loc */
+      LOG(FATAL) << "Long half-temp, half-promoted";
+    }
+
+    info1->dirty = false;
+    info2->dirty = false;
+    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
+      info1 = info2;
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+  }
+}
+
+void X86Mir2Lir::FlushReg(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  if (info->live && info->dirty) {
+    info->dirty = false;
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
+  }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool X86Mir2Lir::IsFpReg(int reg) {
+  return X86_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void X86Mir2Lir::ClobberCalleeSave()
+{
+  Clobber(rAX);
+  Clobber(rCX);
+  Clobber(rDX);
+}
+
+RegLocation X86Mir2Lir::GetReturnWideAlt() {
+  RegLocation res = LocCReturnWide();
+  CHECK(res.low_reg == rAX);
+  CHECK(res.high_reg == rDX);
+  Clobber(rAX);
+  Clobber(rDX);
+  MarkInUse(rAX);
+  MarkInUse(rDX);
+  MarkPair(res.low_reg, res.high_reg);
+  return res;
+}
+
+RegLocation X86Mir2Lir::GetReturnAlt()
+{
+  RegLocation res = LocCReturn();
+  res.low_reg = rDX;
+  Clobber(rDX);
+  MarkInUse(rDX);
+  return res;
+}
+
+X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg)
+{
+  return X86_FPREG(reg) ? &reg_pool_->FPRegs[reg & X86_FP_REG_MASK]
+                    : &reg_pool_->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void X86Mir2Lir::LockCallTemps()
+{
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
+  LockTemp(rX86_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+void X86Mir2Lir::FreeCallTemps()
+{
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
+  FreeTemp(rX86_ARG3);
+}
+
+void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+  // TODO: optimize fences
+  NewLIR0(kX86Mfence);
+#endif
+}
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
+                          int reg_class)
+{
+  int high_reg;
+  int low_reg;
+  int res = 0;
+
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    low_reg = AllocTempDouble();
+    high_reg = low_reg + 1;
+    res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+    return res;
+  }
+
+  low_reg = AllocTemp();
+  high_reg = AllocTemp();
+  res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+  return res;
+}
+
+int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    return AllocTempFloat();
+  }
+  return AllocTemp();
+}
+
+void X86Mir2Lir::CompilerInitializeRegAlloc() {
+  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs =
+      static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                                                ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs =
+      static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                                                 ArenaAllocator::kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
+  // Keep special registers from being allocated
+  for (int i = 0; i < num_reserved; i++) {
+    MarkInUse(ReservedRegs[i]);
+  }
+  // Mark temp regs - all others not in use can be used for promotion
+  for (int i = 0; i < num_temps; i++) {
+    MarkTemp(core_temps[i]);
+  }
+  for (int i = 0; i < num_fp_temps; i++) {
+    MarkTemp(fp_temps[i]);
+  }
+}
+
+void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
+                     RegLocation rl_free)
+{
+  if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+      (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+    // No overlap, free both
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
+  }
+}
+
+void X86Mir2Lir::SpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  // Spill mask not including fake return address register
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      StoreWordDisp(rX86_SP, offset, reg);
+      offset += 4;
+    }
+  }
+}
+
+void X86Mir2Lir::UnSpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  // Spill mask not including fake return address register
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      LoadWordDisp(rX86_SP, offset, reg);
+      offset += 4;
+    }
+  }
+}
+
+bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir)
+{
+  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
+}
+
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
+  for (int i = 0; i < kX86Last; i++) {
+    if (X86Mir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+                 << " is wrong: expecting " << i << ", seeing "
+                 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
+    }
+  }
+}
+
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena) {
+  return new X86Mir2Lir(cu, mir_graph, arena);
+}
+
+// Not used in x86
+int X86Mir2Lir::LoadHelper(int offset)
+{
+  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
+  return INVALID_REG;
+}
+
+uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* X86Mir2Lir::GetTargetInstName(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].name;
+}
+
+const char* X86Mir2Lir::GetTargetInstFmt(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
new file mode 100644
index 0000000..fb07ff1
--- /dev/null
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the X86 ISA */
+
+LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src)
+{
+  int opcode;
+  /* must be both DOUBLE or both not DOUBLE */
+  DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
+  if (X86_DOUBLEREG(r_dest)) {
+    opcode = kX86MovsdRR;
+  } else {
+    if (X86_SINGLEREG(r_dest)) {
+      if (X86_SINGLEREG(r_src)) {
+        opcode = kX86MovssRR;
+      } else {  // Fpr <- Gpr
+        opcode = kX86MovdxrRR;
+      }
+    } else {  // Gpr <- Fpr
+      DCHECK(X86_SINGLEREG(r_src));
+      opcode = kX86MovdrxRR;
+    }
+  }
+  DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value)
+{
+  return true;
+}
+
+bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value)
+{
+  return false;
+}
+
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value)
+{
+  return true;
+}
+
+bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value)
+{
+  return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.  If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value)
+{
+  int r_dest_save = r_dest;
+  if (X86_FPREG(r_dest)) {
+    if (value == 0) {
+      return NewLIR2(kX86XorpsRR, r_dest, r_dest);
+    }
+    DCHECK(X86_SINGLEREG(r_dest));
+    r_dest = AllocTemp();
+  }
+
+  LIR *res;
+  if (value == 0) {
+    res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
+  } else {
+    // Note, there is no byte immediate form of a 32 bit immediate move.
+    res = NewLIR2(kX86Mov32RI, r_dest, value);
+  }
+
+  if (X86_FPREG(r_dest_save)) {
+    NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
+    FreeTemp(r_dest);
+  }
+
+  return res;
+}
+
+LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target)
+{
+  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+  res->target = target;
+  return res;
+}
+
+LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
+{
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
+                        X86ConditionEncoding(cc));
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpNeg: opcode = kX86Neg32R; break;
+    case kOpNot: opcode = kX86Not32R; break;
+    case kOpBlx: opcode = kX86CallR; break;
+    default:
+      LOG(FATAL) << "Bad case in OpReg " << op;
+  }
+  return NewLIR1(opcode, r_dest_src);
+}
+
+LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
+{
+  X86OpCode opcode = kX86Bkpt;
+  bool byte_imm = IS_SIMM8(value);
+  DCHECK(!X86_FPREG(r_dest_src1));
+  switch (op) {
+    case kOpLsl: opcode = kX86Sal32RI; break;
+    case kOpLsr: opcode = kX86Shr32RI; break;
+    case kOpAsr: opcode = kX86Sar32RI; break;
+    case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
+    case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
+    case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
+    //case kOpSbb: opcode = kX86Sbb32RI; break;
+    case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
+    case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
+    case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
+    case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
+    case kOpMov: return LoadConstantNoClobber(r_dest_src1, value);
+    case kOpMul:
+      opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
+      return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
+    default:
+      LOG(FATAL) << "Bad case in OpRegImm " << op;
+  }
+  return NewLIR2(opcode, r_dest_src1, value);
+}
+
+LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
+{
+    X86OpCode opcode = kX86Nop;
+    bool src2_must_be_cx = false;
+    switch (op) {
+        // X86 unary opcodes
+      case kOpMvn:
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNot, r_dest_src1);
+      case kOpNeg:
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNeg, r_dest_src1);
+        // X86 binary opcodes
+      case kOpSub: opcode = kX86Sub32RR; break;
+      case kOpSbc: opcode = kX86Sbb32RR; break;
+      case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
+      case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
+      case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
+      case kOpMov: opcode = kX86Mov32RR; break;
+      case kOpCmp: opcode = kX86Cmp32RR; break;
+      case kOpAdd: opcode = kX86Add32RR; break;
+      case kOpAdc: opcode = kX86Adc32RR; break;
+      case kOpAnd: opcode = kX86And32RR; break;
+      case kOpOr:  opcode = kX86Or32RR; break;
+      case kOpXor: opcode = kX86Xor32RR; break;
+      case kOp2Byte:
+        // Use shifts instead of a byte operand if the source can't be byte accessed.
+        if (r_src2 >= 4) {
+          NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
+          NewLIR2(kX86Sal32RI, r_dest_src1, 24);
+          return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
+        } else {
+          opcode = kX86Movsx8RR;
+        }
+        break;
+      case kOp2Short: opcode = kX86Movsx16RR; break;
+      case kOp2Char: opcode = kX86Movzx16RR; break;
+      case kOpMul: opcode = kX86Imul32RR; break;
+      default:
+        LOG(FATAL) << "Bad case in OpRegReg " << op;
+        break;
+    }
+    CHECK(!src2_must_be_cx || r_src2 == rCX);
+    return NewLIR2(opcode, r_dest_src1, r_src2);
+}
+
+LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
+              int offset)
+{
+  X86OpCode opcode = kX86Nop;
+  switch (op) {
+      // X86 binary opcodes
+    case kOpSub: opcode = kX86Sub32RM; break;
+    case kOpMov: opcode = kX86Mov32RM; break;
+    case kOpCmp: opcode = kX86Cmp32RM; break;
+    case kOpAdd: opcode = kX86Add32RM; break;
+    case kOpAnd: opcode = kX86And32RM; break;
+    case kOpOr:  opcode = kX86Or32RM; break;
+    case kOpXor: opcode = kX86Xor32RM; break;
+    case kOp2Byte: opcode = kX86Movsx8RM; break;
+    case kOp2Short: opcode = kX86Movsx16RM; break;
+    case kOp2Char: opcode = kX86Movzx16RM; break;
+    case kOpMul:
+    default:
+      LOG(FATAL) << "Bad case in OpRegMem " << op;
+      break;
+  }
+  return NewLIR3(opcode, r_dest, rBase, offset);
+}
+
+LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
+                 int r_src2)
+{
+  if (r_dest != r_src1 && r_dest != r_src2) {
+    if (op == kOpAdd) { // lea special case, except can't encode rbp as base
+      if (r_src1 == r_src2) {
+        OpRegCopy(r_dest, r_src1);
+        return OpRegImm(kOpLsl, r_dest, 1);
+      } else if (r_src1 != rBP) {
+        return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
+                       r_src2 /* index */, 0 /* scale */, 0 /* disp */);
+      } else {
+        return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
+                       r_src1 /* index */, 0 /* scale */, 0 /* disp */);
+      }
+    } else {
+      OpRegCopy(r_dest, r_src1);
+      return OpRegReg(op, r_dest, r_src2);
+    }
+  } else if (r_dest == r_src1) {
+    return OpRegReg(op, r_dest, r_src2);
+  } else {  // r_dest == r_src2
+    switch (op) {
+      case kOpSub:  // non-commutative
+        OpReg(kOpNeg, r_dest);
+        op = kOpAdd;
+        break;
+      case kOpSbc:
+      case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
+        int t_reg = AllocTemp();
+        OpRegCopy(t_reg, r_src1);
+        OpRegReg(op, t_reg, r_src2);
+        LIR* res = OpRegCopy(r_dest, t_reg);
+        FreeTemp(t_reg);
+        return res;
+      }
+      case kOpAdd:  // commutative
+      case kOpOr:
+      case kOpAdc:
+      case kOpAnd:
+      case kOpXor:
+        break;
+      default:
+        LOG(FATAL) << "Bad case in OpRegRegReg " << op;
+    }
+    return OpRegReg(op, r_dest, r_src1);
+  }
+}
+
+LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
+                 int value)
+{
+  if (op == kOpMul) {
+    X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
+    return NewLIR3(opcode, r_dest, r_src, value);
+  } else if (op == kOpAnd) {
+    if (value == 0xFF && r_src < 4) {
+      return NewLIR2(kX86Movzx8RR, r_dest, r_src);
+    } else if (value == 0xFFFF) {
+      return NewLIR2(kX86Movzx16RR, r_dest, r_src);
+    }
+  }
+  if (r_dest != r_src) {
+    if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
+      // TODO: fix bug in LEA encoding when disp == 0
+      return NewLIR5(kX86Lea32RA, r_dest,  r5sib_no_base /* base */,
+                     r_src /* index */, value /* scale */, 0 /* disp */);
+    } else if (op == kOpAdd) { // lea add special case
+      return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
+                     r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
+    }
+    OpRegCopy(r_dest, r_src);
+  }
+  return OpRegImm(op, r_dest, value);
+}
+
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpBlx: opcode = kX86CallT;  break;
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  return NewLIR1(opcode, thread_offset);
+}
+
+LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpBlx: opcode = kX86CallM;  break;
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  return NewLIR2(opcode, rBase, disp);
+}
+
+LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
+{
+    int32_t val_lo = Low32Bits(value);
+    int32_t val_hi = High32Bits(value);
+    LIR *res;
+    if (X86_FPREG(r_dest_lo)) {
+      DCHECK(X86_FPREG(r_dest_hi));  // ignore r_dest_hi
+      if (value == 0) {
+        return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+      } else {
+        if (val_lo == 0) {
+          res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+        } else {
+          res = LoadConstantNoClobber(r_dest_lo, val_lo);
+        }
+        if (val_hi != 0) {
+          LoadConstantNoClobber(r_dest_hi, val_hi);
+          NewLIR2(kX86PsllqRI, r_dest_hi, 32);
+          NewLIR2(kX86OrpsRR, r_dest_lo, r_dest_hi);
+        }
+      }
+    } else {
+      res = LoadConstantNoClobber(r_dest_lo, val_lo);
+      LoadConstantNoClobber(r_dest_hi, val_hi);
+    }
+    return res;
+}
+
+LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg) {
+  LIR *load = NULL;
+  LIR *load2 = NULL;
+  bool is_array = r_index != INVALID_REG;
+  bool pair = false;
+  bool is64bit = false;
+  X86OpCode opcode = kX86Nop;
+  switch (size) {
+    case kLong:
+    case kDouble:
+      is64bit = true;
+      if (X86_FPREG(r_dest)) {
+        opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
+        if (X86_SINGLEREG(r_dest)) {
+          DCHECK(X86_FPREG(r_dest_hi));
+          DCHECK_EQ(r_dest, (r_dest_hi - 1));
+          r_dest = S2d(r_dest, r_dest_hi);
+        }
+        r_dest_hi = r_dest + 1;
+      } else {
+        pair = true;
+        opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
+      }
+      // TODO: double store is to unaligned address
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
+      if (X86_FPREG(r_dest)) {
+        opcode = is_array ? kX86MovssRA : kX86MovssRM;
+        DCHECK(X86_SINGLEREG(r_dest));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+      opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kSignedHalf:
+      opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+      opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
+      break;
+    case kSignedByte:
+      opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+  }
+
+  if (!is_array) {
+    if (!pair) {
+      load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+    } else {
+      if (rBase == r_dest) {
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
+                        displacement + HIWORD_OFFSET);
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+      } else {
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
+                        displacement + HIWORD_OFFSET);
+      }
+    }
+    if (rBase == rX86_SP) {
+      AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              true /* is_load */, is64bit);
+      if (pair) {
+        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+                                true /* is_load */, is64bit);
+      }
+    }
+  } else {
+    if (!pair) {
+      load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                     displacement + LOWORD_OFFSET);
+    } else {
+      if (rBase == r_dest) {
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+                        displacement + HIWORD_OFFSET);
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                       displacement + LOWORD_OFFSET);
+      } else {
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                       displacement + LOWORD_OFFSET);
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+                        displacement + HIWORD_OFFSET);
+      }
+    }
+  }
+
+  return load;
+}
+
+/* Load value from base + scaled index. */
+LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
+                     int r_index, int r_dest, int scale, OpSize size) {
+  return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
+                             r_dest, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
+                  int r_dest, OpSize size, int s_reg) {
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                             r_dest, INVALID_REG, size, s_reg);
+}
+
+LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
+                      int r_dest_lo, int r_dest_hi, int s_reg) {
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                             r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg) {
+  LIR *store = NULL;
+  LIR *store2 = NULL;
+  bool is_array = r_index != INVALID_REG;
+  bool pair = false;
+  bool is64bit = false;
+  X86OpCode opcode = kX86Nop;
+  switch (size) {
+    case kLong:
+    case kDouble:
+      is64bit = true;
+      if (X86_FPREG(r_src)) {
+        opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
+        if (X86_SINGLEREG(r_src)) {
+          DCHECK(X86_FPREG(r_src_hi));
+          DCHECK_EQ(r_src, (r_src_hi - 1));
+          r_src = S2d(r_src, r_src_hi);
+        }
+        r_src_hi = r_src + 1;
+      } else {
+        pair = true;
+        opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
+      }
+      // TODO: double store is to unaligned address
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
+      if (X86_FPREG(r_src)) {
+        opcode = is_array ? kX86MovssAR : kX86MovssMR;
+        DCHECK(X86_SINGLEREG(r_src));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+  }
+
+  if (!is_array) {
+    if (!pair) {
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+    } else {
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+      store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
+    }
+    if (rBase == rX86_SP) {
+      AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              false /* is_load */, is64bit);
+      if (pair) {
+        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+                                false /* is_load */, is64bit);
+      }
+    }
+  } else {
+    if (!pair) {
+      store = NewLIR5(opcode, rBase, r_index, scale,
+                      displacement + LOWORD_OFFSET, r_src);
+    } else {
+      store = NewLIR5(opcode, rBase, r_index, scale,
+                      displacement + LOWORD_OFFSET, r_src);
+      store2 = NewLIR5(opcode, rBase, r_index, scale,
+                       displacement + HIWORD_OFFSET, r_src_hi);
+    }
+  }
+
+  return store;
+}
+
+/* store value base base + scaled index. */
+LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+                      int scale, OpSize size)
+{
+  return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
+                              r_src, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
+                               int r_src, OpSize size)
+{
+    return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
+                                displacement, r_src, INVALID_REG, size,
+                                INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
+                                   int r_src_lo, int r_src_hi)
+{
+  return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                              r_src_lo, r_src_hi, kLong, INVALID_SREG);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
new file mode 100644
index 0000000..600bd03
--- /dev/null
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64), although
+ * we currently only target x86. The ABI has different conventions and we hope to have a single
+ * convention to simplify code generation. Changing something that is callee save and making it
+ * caller save places a burden on up-calls to save/restore the callee save register, however, there
+ * are few registers that are callee save in the ABI. Changing something that is caller save and
+ * making it callee save places a burden on down-calls to save/restore the callee save register.
+ * For these reasons we aim to match native conventions for caller and callee save. The first 4
+ * registers can be used for byte operations, for this reason they are preferred for temporary
+ * scratch registers.
+ *
+ * General Purpose Register:
+ *  Native: x86         | x86-64 / x32      | ART
+ *  r0/eax: caller save | caller save       | caller, Method*, scratch, return value
+ *  r1/ecx: caller save | caller save, arg4 | caller, arg1, scratch
+ *  r2/edx: caller save | caller save, arg3 | caller, arg2, scratch, high half of long return
+ *  r3/ebx: callEE save | callEE save       | callER, arg3, scratch
+ *  r4/esp: stack pointer
+ *  r5/ebp: callee save | callee save       | callee, available for dalvik register promotion
+ *  r6/esi: callEE save | callER save, arg2 | callee, available for dalvik register promotion
+ *  r7/edi: callEE save | callER save, arg1 | callee, available for dalvik register promotion
+ *  ---  x86-64/x32 registers
+ *  Native: x86-64 / x32      | ART
+ *  r8:     caller save, arg5 | caller, scratch
+ *  r9:     caller save, arg6 | caller, scratch
+ *  r10:    caller save       | caller, scratch
+ *  r11:    caller save       | caller, scratch
+ *  r12:    callee save       | callee, available for dalvik register promotion
+ *  r13:    callee save       | callee, available for dalvik register promotion
+ *  r14:    callee save       | callee, available for dalvik register promotion
+ *  r15:    callee save       | callee, available for dalvik register promotion
+ *
+ * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
+ * x86-64/x32 gs: holds it.
+ *
+ * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
+ *  Native: x86       | x86-64 / x32     | ART
+ *  XMM0: caller save |caller save, arg1 | caller, float/double return value (except for native x86 code)
+ *  XMM1: caller save |caller save, arg2 | caller, scratch
+ *  XMM2: caller save |caller save, arg3 | caller, scratch
+ *  XMM3: caller save |caller save, arg4 | caller, scratch
+ *  XMM4: caller save |caller save, arg5 | caller, scratch
+ *  XMM5: caller save |caller save, arg6 | caller, scratch
+ *  XMM6: caller save |caller save, arg7 | caller, scratch
+ *  XMM7: caller save |caller save, arg8 | caller, scratch
+ *  ---  x86-64/x32 registers
+ *  XMM8 .. 15: caller save
+ *
+ * X87 is a necessary evil outside of ART code:
+ *  ST0:  x86 float/double native return value, caller save
+ *  ST1 .. ST7: caller save
+ *
+ *  Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1]              |  {Note: resides in caller's frame}
+ * |       .                |
+ * | IN[0]                  |
+ * | caller's Method*       |
+ * +========================+  {Note: start of callee's frame}
+ * | return address         |  {pushed by call}
+ * | spill region           |  {variable sized}
+ * +------------------------+
+ * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1]            |
+ * | V[locals-2]            |
+ * |      .                 |
+ * |      .                 |
+ * | V[1]                   |
+ * | V[0]                   |
+ * +------------------------+
+ * |  0 to 3 words padding  |
+ * +------------------------+
+ * | OUT[outs-1]            |
+ * | OUT[outs-2]            |
+ * |       .                |
+ * | OUT[0]                 |
+ * | cur_method*            | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define X86_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
+// Offset to distingish the extra regs.
+#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
+// Reg types.
+#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
+#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
+#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
+#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
+#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
+
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area.  Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define X86_S2D(x,y) ((x) | X86_FP_DOUBLE)
+/* Mask to strip off fp flags */
+#define X86_FP_REG_MASK 0xF
+
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
+//                               location,     wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg,     s_reg_low
+#define X86_LOC_C_RETURN             {kLocPhysReg, 0,    0,       0,     0,  0,    0,   0,        1,    rAX,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_WIDE        {kLocPhysReg, 1,    0,       0,     0,  0,    0,   0,        1,    rAX,    rDX,         INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_FLOAT       {kLocPhysReg, 0,    0,       0,     1,  0,    0,   0,        1,    fr0,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_DOUBLE      {kLocPhysReg, 1,    0,       0,     1,  0,    0,   0,        1,    fr0,    fr1,         INVALID_SREG, INVALID_SREG}
+
+enum X86ResourceEncodingPos {
+  kX86GPReg0   = 0,
+  kX86RegSP    = 4,
+  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
+  kX86FPRegEnd   = 32,
+  kX86RegEnd   = kX86FPRegEnd,
+};
+
+#define ENCODE_X86_REG_LIST(N)      (static_cast<uint64_t>(N))
+#define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
+
+enum X86NativeRegisterPool {
+  r0     = 0,
+  rAX    = r0,
+  r1     = 1,
+  rCX    = r1,
+  r2     = 2,
+  rDX    = r2,
+  r3     = 3,
+  rBX    = r3,
+  r4sp   = 4,
+  rX86_SP    = r4sp,
+  r4sib_no_index = r4sp,
+  r5     = 5,
+  rBP    = r5,
+  r5sib_no_base = r5,
+  r6     = 6,
+  rSI    = r6,
+  r7     = 7,
+  rDI    = r7,
+#ifndef TARGET_REX_SUPPORT
+  rRET   = 8,  // fake return address register for core spill mask.
+#else
+  r8     = 8,
+  r9     = 9,
+  r10    = 10,
+  r11    = 11,
+  r12    = 12,
+  r13    = 13,
+  r14    = 14,
+  r15    = 15,
+  rRET   = 16,  // fake return address register for core spill mask.
+#endif
+  fr0  =  0 + X86_FP_REG_OFFSET,
+  fr1  =  1 + X86_FP_REG_OFFSET,
+  fr2  =  2 + X86_FP_REG_OFFSET,
+  fr3  =  3 + X86_FP_REG_OFFSET,
+  fr4  =  4 + X86_FP_REG_OFFSET,
+  fr5  =  5 + X86_FP_REG_OFFSET,
+  fr6  =  6 + X86_FP_REG_OFFSET,
+  fr7  =  7 + X86_FP_REG_OFFSET,
+  fr8  =  8 + X86_FP_REG_OFFSET,
+  fr9  =  9 + X86_FP_REG_OFFSET,
+  fr10 = 10 + X86_FP_REG_OFFSET,
+  fr11 = 11 + X86_FP_REG_OFFSET,
+  fr12 = 12 + X86_FP_REG_OFFSET,
+  fr13 = 13 + X86_FP_REG_OFFSET,
+  fr14 = 14 + X86_FP_REG_OFFSET,
+  fr15 = 15 + X86_FP_REG_OFFSET,
+};
+
+#define rX86_ARG0 rAX
+#define rX86_ARG1 rCX
+#define rX86_ARG2 rDX
+#define rX86_ARG3 rBX
+#define rX86_FARG0 rAX
+#define rX86_FARG1 rCX
+#define rX86_FARG2 rDX
+#define rX86_FARG3 rBX
+#define rX86_RET0 rAX
+#define rX86_RET1 rDX
+#define rX86_INVOKE_TGT rAX
+#define rX86_LR INVALID_REG
+#define rX86_SUSPEND INVALID_REG
+#define rX86_SELF INVALID_REG
+#define rX86_COUNT rCX
+#define rX86_PC INVALID_REG
+
+/*
+ * The following enum defines the list of supported X86 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum X86OpCode {
+  kX86First = 0,
+  kX8632BitData = kX86First, // data [31..0].
+  kX86Bkpt,
+  kX86Nop,
+  // Define groups of binary operations
+  // MR - Memory Register  - opcode [base + disp], reg
+  //             - lir operands - 0: base, 1: disp, 2: reg
+  // AR - Array Register   - opcode [base + index * scale + disp], reg
+  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+  // TR - Thread Register  - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: disp, 1: reg
+  // RR - Register Register  - opcode reg1, reg2
+  //             - lir operands - 0: reg1, 1: reg2
+  // RM - Register Memory  - opcode reg, [base + disp]
+  //             - lir operands - 0: reg, 1: base, 2: disp
+  // RA - Register Array   - opcode reg, [base + index * scale + disp]
+  //             - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+  // RT - Register Thread  - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: reg, 1: disp
+  // RI - Register Immediate - opcode reg, #immediate
+  //             - lir operands - 0: reg, 1: immediate
+  // MI - Memory Immediate   - opcode [base + disp], #immediate
+  //             - lir operands - 0: base, 1: disp, 2: immediate
+  // AI - Array Immediate  - opcode [base + index * scale + disp], #immediate
+  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+  // TI - Thread Register  - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: disp, 1: imm
+#define BinaryOpCode(opcode) \
+  opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
+  opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
+  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
+  opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
+  opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
+  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
+  opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
+  opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
+  opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
+  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
+  opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
+  BinaryOpCode(kX86Add),
+  BinaryOpCode(kX86Or),
+  BinaryOpCode(kX86Adc),
+  BinaryOpCode(kX86Sbb),
+  BinaryOpCode(kX86And),
+  BinaryOpCode(kX86Sub),
+  BinaryOpCode(kX86Xor),
+  BinaryOpCode(kX86Cmp),
+#undef BinaryOpCode
+  kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
+  kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
+  kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+  kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
+  kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
+  kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
+  kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
+  kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
+  kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
+  kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+  kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
+  kX86Lea32RA,
+  // RC - Register CL - opcode reg, CL
+  //          - lir operands - 0: reg, 1: CL
+  // MC - Memory CL   - opcode [base + disp], CL
+  //          - lir operands - 0: base, 1: disp, 2: CL
+  // AC - Array CL  - opcode [base + index * scale + disp], CL
+  //          - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
+#define BinaryShiftOpCode(opcode) \
+  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
+  opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
+  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
+  opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
+  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
+  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+  BinaryShiftOpCode(kX86Rol),
+  BinaryShiftOpCode(kX86Ror),
+  BinaryShiftOpCode(kX86Rcl),
+  BinaryShiftOpCode(kX86Rcr),
+  BinaryShiftOpCode(kX86Sal),
+  BinaryShiftOpCode(kX86Shr),
+  BinaryShiftOpCode(kX86Sar),
+#undef BinaryShiftOpcode
+  kX86Cmc,
+#define UnaryOpcode(opcode, reg, mem, array) \
+  opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
+  opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
+  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+  UnaryOpcode(kX86Test, RI, MI, AI),
+  kX86Test32RR,
+  UnaryOpcode(kX86Not, R, M, A),
+  UnaryOpcode(kX86Neg, R, M, A),
+  UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
+  UnaryOpcode(kX86Imul, DaR, DaM, DaA),
+  UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
+  UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+#undef UnaryOpcode
+#define Binary0fOpCode(opcode) \
+  opcode ## RR, opcode ## RM, opcode ## RA
+  Binary0fOpCode(kX86Movsd),
+  kX86MovsdMR,
+  kX86MovsdAR,
+  Binary0fOpCode(kX86Movss),
+  kX86MovssMR,
+  kX86MovssAR,
+  Binary0fOpCode(kX86Cvtsi2sd), // int to double
+  Binary0fOpCode(kX86Cvtsi2ss), // int to float
+  Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
+  Binary0fOpCode(kX86Cvttss2si),// truncating float to int
+  Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
+  Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+  Binary0fOpCode(kX86Ucomisd),  // unordered double compare
+  Binary0fOpCode(kX86Ucomiss),  // unordered float compare
+  Binary0fOpCode(kX86Comisd),   // double compare
+  Binary0fOpCode(kX86Comiss),   // float compare
+  Binary0fOpCode(kX86Orps),     // or of floating point registers
+  Binary0fOpCode(kX86Xorps),    // xor of floating point registers
+  Binary0fOpCode(kX86Addsd),    // double add
+  Binary0fOpCode(kX86Addss),    // float add
+  Binary0fOpCode(kX86Mulsd),    // double multiply
+  Binary0fOpCode(kX86Mulss),    // float multiply
+  Binary0fOpCode(kX86Cvtsd2ss), // double to float
+  Binary0fOpCode(kX86Cvtss2sd), // float to double
+  Binary0fOpCode(kX86Subsd),    // double subtract
+  Binary0fOpCode(kX86Subss),    // float subtract
+  Binary0fOpCode(kX86Divsd),    // double divide
+  Binary0fOpCode(kX86Divss),    // float divide
+  kX86PsrlqRI,                  // right shift of floating point registers
+  kX86PsllqRI,                  // left shift of floating point registers
+  Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
+  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,// move into reg from xmm
+  kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+  kX86Mfence,                   // memory barrier
+  Binary0fOpCode(kX86Imul16),   // 16bit multiply
+  Binary0fOpCode(kX86Imul32),   // 32bit multiply
+  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
+  kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
+  Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
+  Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
+  Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
+  Binary0fOpCode(kX86Movsx16),  // sign-extend 16-bit value
+#undef Binary0fOpCode
+  kX86Jcc8, kX86Jcc32,  // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
+  kX86Jmp8, kX86Jmp32,  // jmp rel8/32; lir operands - 0: rel, target assigned
+  kX86JmpR,             // jmp reg; lir operands - 0: reg
+  kX86CallR,            // call reg; lir operands - 0: reg
+  kX86CallM,            // call [base + disp]; lir operands - 0: base, 1: disp
+  kX86CallA,            // call [base + index * scale + disp]
+                        // lir operands - 0: base, 1: index, 2: scale, 3: disp
+  kX86CallT,            // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
+  kX86Ret,              // ret; no lir operands
+  kX86StartOfMethod,    // call 0; pop reg; sub reg, # - generate start of method into reg
+                        // lir operands - 0: reg
+  kX86PcRelLoadRA,      // mov reg, [base + index * scale + PC relative displacement]
+                        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+  kX86PcRelAdr,         // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
+  kX86Last
+};
+
+/* Instruction assembly field_loc kind */
+enum X86EncodingKind {
+  kData,                                   // Special case for raw data.
+  kNop,                                    // Special case for variable length nop.
+  kNullary,                                // Opcode that takes no arguments.
+  kReg, kMem, kArray,                      // R, M and A instruction kinds.
+  kMemReg, kArrayReg, kThreadReg,          // MR, AR and TR instruction kinds.
+  kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
+  kRegRegStore,                            // RR following the store modrm reg-reg encoding rather than the load.
+  kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
+  kRegRegImm, kRegMemImm, kRegArrayImm,    // RRI, RMI and RAI instruction kinds.
+  kMovRegImm,                              // Shorter form move RI.
+  kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
+  kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
+  kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
+  kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
+  kJmp, kJcc, kCall,                       // Branch instruction kinds.
+  kPcRel,                                  // Operation with displacement that is PC relative
+  kMacro,                                  // An instruction composing multiple others
+  kUnimplemented                           // Encoding used when an instruction isn't yet implemented.
+};
+
+/* Struct used to define the EncodingMap positions for each X86 opcode */
+struct X86EncodingMap {
+  X86OpCode opcode;      // e.g. kOpAddRI
+  X86EncodingKind kind;  // Used to discriminate in the union below
+  uint64_t flags;
+  struct {
+  uint8_t prefix1;       // non-zero => a prefix byte
+  uint8_t prefix2;       // non-zero => a second prefix byte
+  uint8_t opcode;        // 1 byte opcode
+  uint8_t extra_opcode1; // possible extra opcode byte
+  uint8_t extra_opcode2; // possible second extra opcode byte
+  // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
+  // encoding kind
+  uint8_t modrm_opcode;
+  uint8_t ax_opcode;  // non-zero => shorter encoding for AX as a destination
+  uint8_t immediate_bytes; // number of bytes of immediate
+  } skeleton;
+  const char *name;
+  const char* fmt;
+};
+
+
+// FIXME: mem barrier type - what do we do for x86?
+#define kSY 0
+#define kST 0
+
+// Offsets of high and low halves of a 64bit value.
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+
+// Segment override instruction prefix used for quick TLS access to Thread::Current().
+#define THREAD_PREFIX 0x64
+
+#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
+
+extern X86EncodingMap EncodingMap[kX86Last];
+extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
new file mode 100644
index 0000000..4182072
--- /dev/null
+++ b/compiler/dex/ssa_transformation.cc
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
+
+#define NOTVISITED (-1)
+
+namespace art {
+
+void MIRGraph::ClearAllVisitedFlags() {
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    bb->visited = false;
+  }
+}
+
+BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
+  if (bb != NULL) {
+    if (bb->visited || bb->hidden) {
+      bb = NULL;
+    }
+  }
+  return bb;
+}
+
+BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb)
+{
+  BasicBlock* res = NeedsVisit(bb->fall_through);
+  if (res == NULL) {
+    res = NeedsVisit(bb->taken);
+    if (res == NULL) {
+      if (bb->successor_block_list.block_list_type != kNotUsed) {
+        GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+        while (true) {
+          SuccessorBlockInfo *sbi = iterator.Next();
+          if (sbi == NULL) break;
+          res = NeedsVisit(sbi->block);
+          if (res != NULL) break;
+        }
+      }
+    }
+  }
+  return res;
+}
+
+void MIRGraph::MarkPreOrder(BasicBlock* block)
+{
+  block->visited = true;
+  /* Enqueue the pre_order block id */
+  dfs_order_->Insert(block->id);
+}
+
+void MIRGraph::RecordDFSOrders(BasicBlock* block)
+{
+  std::vector<BasicBlock*> succ;
+  MarkPreOrder(block);
+  succ.push_back(block);
+  while (!succ.empty()) {
+    BasicBlock* curr = succ.back();
+    BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
+    if (next_successor != NULL) {
+      MarkPreOrder(next_successor);
+      succ.push_back(next_successor);
+      continue;
+    }
+    curr->dfs_id = dfs_post_order_->Size();
+    dfs_post_order_->Insert(curr->id);
+    succ.pop_back();
+  }
+}
+
+/* Sort the blocks by the Depth-First-Search */
+void MIRGraph::ComputeDFSOrders()
+{
+  /* Initialize or reset the DFS pre_order list */
+  if (dfs_order_ == NULL) {
+    dfs_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsOrder);
+  } else {
+    /* Just reset the used length on the counter */
+    dfs_order_->Reset();
+  }
+
+  /* Initialize or reset the DFS post_order list */
+  if (dfs_post_order_ == NULL) {
+    dfs_post_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsPostOrder);
+  } else {
+    /* Just reset the used length on the counter */
+    dfs_post_order_->Reset();
+  }
+
+  // Reset visited flags from all nodes
+  ClearAllVisitedFlags();
+
+  // Record dfs orders
+  RecordDFSOrders(GetEntryBlock());
+
+  num_reachable_blocks_ = dfs_order_->Size();
+}
+
+/*
+ * Mark block bit on the per-Dalvik register vector to denote that Dalvik
+ * register idx is defined in BasicBlock bb.
+ */
+bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb)
+{
+  if (bb->data_flow_info == NULL) return false;
+
+  ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v);
+  while (true) {
+    int idx = iterator.Next();
+    if (idx == -1) break;
+    /* Block bb defines register idx */
+    def_block_matrix_[idx]->SetBit(bb->id);
+  }
+  return true;
+}
+
+void MIRGraph::ComputeDefBlockMatrix()
+{
+  int num_registers = cu_->num_dalvik_registers;
+  /* Allocate num_dalvik_registers bit vector pointers */
+  def_block_matrix_ = static_cast<ArenaBitVector**>
+      (arena_->NewMem(sizeof(ArenaBitVector *) * num_registers, true,
+                      ArenaAllocator::kAllocDFInfo));
+  int i;
+
+  /* Initialize num_register vectors with num_blocks bits each */
+  for (i = 0; i < num_registers; i++) {
+    def_block_matrix_[i] =
+        new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
+  }
+  AllNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    FindLocalLiveIn(bb);
+  }
+  AllNodesIterator iter2(this, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    FillDefBlockMatrix(bb);
+  }
+
+  /*
+   * Also set the incoming parameters as defs in the entry block.
+   * Only need to handle the parameters for the outer method.
+   */
+  int num_regs = cu_->num_dalvik_registers;
+  int in_reg = num_regs - cu_->num_ins;
+  for (; in_reg < num_regs; in_reg++) {
+    def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+  }
+}
+
+void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
+  if (dom_post_order_traversal_ == NULL) {
+    // First time - create the array.
+    dom_post_order_traversal_ =
+        new (arena_) GrowableArray<int>(arena_, num_reachable_blocks_,
+                                        kGrowableArrayDomPostOrderTraversal);
+  } else {
+    dom_post_order_traversal_->Reset();
+  }
+  ClearAllVisitedFlags();
+  std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*> > work_stack;
+  bb->visited = true;
+  work_stack.push_back(std::make_pair(bb, new (arena_) ArenaBitVector::Iterator(bb->i_dominated)));
+  while (!work_stack.empty()) {
+    std::pair<BasicBlock*, ArenaBitVector::Iterator*> curr = work_stack.back();
+    BasicBlock* curr_bb = curr.first;
+    ArenaBitVector::Iterator* curr_idom_iter = curr.second;
+    int bb_idx = curr_idom_iter->Next();
+    while ((bb_idx != -1) && (NeedsVisit(GetBasicBlock(bb_idx)) == NULL)) {
+      bb_idx = curr_idom_iter->Next();
+    }
+    if (bb_idx != -1) {
+      BasicBlock* new_bb = GetBasicBlock(bb_idx);
+      new_bb->visited = true;
+      work_stack.push_back(
+          std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated)));
+    } else {
+      // no successor/next
+      dom_post_order_traversal_->Insert(curr_bb->id);
+      work_stack.pop_back();
+
+      /* hacky loop detection */
+      if (curr_bb->taken && curr_bb->dominators->IsBitSet(curr_bb->taken->id)) {
+        attributes_ |= METHOD_HAS_LOOP;
+      }
+    }
+  }
+}
+
+void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
+                                         const BasicBlock* succ_bb)
+{
+  /*
+   * TODO - evaluate whether phi will ever need to be inserted into exit
+   * blocks.
+   */
+  if (succ_bb->i_dom != dom_bb &&
+    succ_bb->block_type == kDalvikByteCode &&
+    succ_bb->hidden == false) {
+    dom_bb->dom_frontier->SetBit(succ_bb->id);
+  }
+}
+
+/* Worker function to compute the dominance frontier */
+bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb)
+{
+  /* Calculate DF_local */
+  if (bb->taken) {
+    CheckForDominanceFrontier(bb, bb->taken);
+  }
+  if (bb->fall_through) {
+    CheckForDominanceFrontier(bb, bb->fall_through);
+  }
+  if (bb->successor_block_list.block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+      while (true) {
+        SuccessorBlockInfo *successor_block_info = iterator.Next();
+        if (successor_block_info == NULL) break;
+        BasicBlock* succ_bb = successor_block_info->block;
+        CheckForDominanceFrontier(bb, succ_bb);
+      }
+  }
+
+  /* Calculate DF_up */
+  ArenaBitVector::Iterator bv_iterator(bb->i_dominated);
+  while (true) {
+    //TUNING: hot call to BitVectorIteratorNext
+    int dominated_idx = bv_iterator.Next();
+    if (dominated_idx == -1) break;
+    BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
+    ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier);
+    while (true) {
+      //TUNING: hot call to BitVectorIteratorNext
+      int df_up_idx = df_iterator.Next();
+      if (df_up_idx == -1) break;
+      BasicBlock* df_up_block = GetBasicBlock(df_up_idx);
+      CheckForDominanceFrontier(bb, df_up_block);
+    }
+  }
+
+  return true;
+}
+
+/* Worker function for initializing domination-related data structures */
+void MIRGraph::InitializeDominationInfo(BasicBlock* bb)
+{
+  int num_total_blocks = GetBasicBlockListCount();
+
+  if (bb->dominators == NULL ) {
+    bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                 false /* expandable */, kBitMapDominators);
+    bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                  false /* expandable */, kBitMapIDominated);
+    bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                   false /* expandable */, kBitMapDomFrontier);
+  } else {
+    bb->dominators->ClearAllBits();
+    bb->i_dominated->ClearAllBits();
+    bb->dom_frontier->ClearAllBits();
+  }
+  /* Set all bits in the dominator vector */
+  bb->dominators->SetInitialBits(num_total_blocks);
+
+  return;
+}
+
+/*
+ * Walk through the ordered i_dom list until we reach common parent.
+ * Given the ordering of i_dom_list, this common parent represents the
+ * last element of the intersection of block1 and block2 dominators.
+  */
+int MIRGraph::FindCommonParent(int block1, int block2)
+{
+  while (block1 != block2) {
+    while (block1 < block2) {
+      block1 = i_dom_list_[block1];
+      DCHECK_NE(block1, NOTVISITED);
+    }
+    while (block2 < block1) {
+      block2 = i_dom_list_[block2];
+      DCHECK_NE(block2, NOTVISITED);
+    }
+  }
+  return block1;
+}
+
+/* Worker function to compute each block's immediate dominator */
+bool MIRGraph::ComputeblockIDom(BasicBlock* bb)
+{
+  /* Special-case entry block */
+  if (bb == GetEntryBlock()) {
+    return false;
+  }
+
+  /* Iterate through the predecessors */
+  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+
+  /* Find the first processed predecessor */
+  int idom = -1;
+  while (true) {
+    BasicBlock* pred_bb = iter.Next();
+    CHECK(pred_bb != NULL);
+    if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
+      idom = pred_bb->dfs_id;
+      break;
+    }
+  }
+
+  /* Scan the rest of the predecessors */
+  while (true) {
+      BasicBlock* pred_bb = iter.Next();
+      if (!pred_bb) break;
+      if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
+        continue;
+      } else {
+        idom = FindCommonParent(pred_bb->dfs_id, idom);
+      }
+  }
+
+  DCHECK_NE(idom, NOTVISITED);
+
+  /* Did something change? */
+  if (i_dom_list_[bb->dfs_id] != idom) {
+    i_dom_list_[bb->dfs_id] = idom;
+    return true;
+  }
+  return false;
+}
+
+/* Worker function to compute each block's domintors */
+bool MIRGraph::ComputeBlockDominators(BasicBlock* bb)
+{
+  if (bb == GetEntryBlock()) {
+    bb->dominators->ClearAllBits();
+  } else {
+    bb->dominators->Copy(bb->i_dom->dominators);
+  }
+  bb->dominators->SetBit(bb->id);
+  return false;
+}
+
+bool MIRGraph::SetDominators(BasicBlock* bb)
+{
+  if (bb != GetEntryBlock()) {
+    int idom_dfs_idx = i_dom_list_[bb->dfs_id];
+    DCHECK_NE(idom_dfs_idx, NOTVISITED);
+    int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
+    BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
+    bb->i_dom = i_dom;
+    /* Add bb to the i_dominated set of the immediate dominator block */
+    i_dom->i_dominated->SetBit(bb->id);
+  }
+  return false;
+}
+
+/* Compute dominators, immediate dominator, and dominance fronter */
+void MIRGraph::ComputeDominators()
+{
+  int num_reachable_blocks = num_reachable_blocks_;
+  int num_total_blocks = GetBasicBlockListCount();
+
+  /* Initialize domination-related data structures */
+  ReachableNodesIterator iter(this, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    InitializeDominationInfo(bb);
+  }
+
+  /* Initalize & Clear i_dom_list */
+  if (i_dom_list_ == NULL) {
+    i_dom_list_ = static_cast<int*>(arena_->NewMem(sizeof(int) * num_reachable_blocks, false,
+                                                   ArenaAllocator::kAllocDFInfo));
+  }
+  for (int i = 0; i < num_reachable_blocks; i++) {
+    i_dom_list_[i] = NOTVISITED;
+  }
+
+  /* For post-order, last block is entry block.  Set its i_dom to istelf */
+  DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
+  i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
+
+  /* Compute the immediate dominators */
+  ReversePostOrderDfsIterator iter2(this, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+    change = ComputeblockIDom(bb);
+  }
+
+  /* Set the dominator for the root node */
+  GetEntryBlock()->dominators->ClearAllBits();
+  GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
+
+  if (temp_block_v_ == NULL) {
+    temp_block_v_ = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                false /* expandable */, kBitMapTmpBlockV);
+  } else {
+    temp_block_v_->ClearAllBits();
+  }
+  GetEntryBlock()->i_dom = NULL;
+
+  ReachableNodesIterator iter3(this, false /* not iterative */);
+  for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+    SetDominators(bb);
+  }
+
+  ReversePostOrderDfsIterator iter4(this, false /* not iterative */);
+  for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+    ComputeBlockDominators(bb);
+  }
+
+  // Compute the dominance frontier for each block.
+  ComputeDomPostOrderTraversal(GetEntryBlock());
+  PostOrderDOMIterator iter5(this, false /* not iterative */);
+  for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+    ComputeDominanceFrontier(bb);
+  }
+}
+
+/*
+ * Perform dest U= src1 ^ ~src2
+ * This is probably not general enough to be placed in BitVector.[ch].
+ */
+void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
+                                 const ArenaBitVector* src2)
+{
+  if (dest->GetStorageSize() != src1->GetStorageSize() ||
+      dest->GetStorageSize() != src2->GetStorageSize() ||
+      dest->IsExpandable() != src1->IsExpandable() ||
+      dest->IsExpandable() != src2->IsExpandable()) {
+    LOG(FATAL) << "Incompatible set properties";
+  }
+
+  unsigned int idx;
+  for (idx = 0; idx < dest->GetStorageSize(); idx++) {
+    dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
+  }
+}
+
+/*
+ * Iterate through all successor blocks and propagate up the live-in sets.
+ * The calculated result is used for phi-node pruning - where we only need to
+ * insert a phi node if the variable is live-in to the block.
+ */
+bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb)
+{
+  ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
+
+  if (bb->data_flow_info == NULL) return false;
+  temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
+  if (bb->taken && bb->taken->data_flow_info)
+    ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
+                      bb->data_flow_info->def_v);
+  if (bb->fall_through && bb->fall_through->data_flow_info)
+    ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
+                      bb->data_flow_info->def_v);
+  if (bb->successor_block_list.block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+    while (true) {
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
+      if (successor_block_info == NULL) break;
+      BasicBlock* succ_bb = successor_block_info->block;
+      if (succ_bb->data_flow_info) {
+        ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
+                          bb->data_flow_info->def_v);
+      }
+    }
+  }
+  if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
+    bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
+    return true;
+  }
+  return false;
+}
+
+/* Insert phi nodes to for each variable to the dominance frontiers */
+void MIRGraph::InsertPhiNodes()
+{
+  int dalvik_reg;
+  ArenaBitVector* phi_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi);
+  ArenaBitVector* tmp_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapTmpBlocks);
+  ArenaBitVector* input_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapInputBlocks);
+
+  temp_dalvik_register_v_ =
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
+
+  PostOrderDfsIterator iter(this, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+    change = ComputeBlockLiveIns(bb);
+  }
+
+  /* Iterate through each Dalvik register */
+  for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
+    bool change;
+
+    input_blocks->Copy(def_block_matrix_[dalvik_reg]);
+    phi_blocks->ClearAllBits();
+
+    /* Calculate the phi blocks for each Dalvik register */
+    do {
+      change = false;
+      tmp_blocks->ClearAllBits();
+      ArenaBitVector::Iterator iterator(input_blocks);
+
+      while (true) {
+        int idx = iterator.Next();
+        if (idx == -1) break;
+          BasicBlock* def_bb = GetBasicBlock(idx);
+
+          /* Merge the dominance frontier to tmp_blocks */
+          //TUNING: hot call to Union().
+          if (def_bb->dom_frontier != NULL) {
+            tmp_blocks->Union(def_bb->dom_frontier);
+          }
+        }
+        if (!phi_blocks->Equal(tmp_blocks)) {
+          change = true;
+          phi_blocks->Copy(tmp_blocks);
+
+          /*
+           * Iterate through the original blocks plus the new ones in
+           * the dominance frontier.
+           */
+          input_blocks->Copy(phi_blocks);
+          input_blocks->Union(def_block_matrix_[dalvik_reg]);
+      }
+    } while (change);
+
+    /*
+     * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
+     * register is in the live-in set.
+     */
+    ArenaBitVector::Iterator iterator(phi_blocks);
+    while (true) {
+      int idx = iterator.Next();
+      if (idx == -1) break;
+      BasicBlock* phi_bb = GetBasicBlock(idx);
+      /* Variable will be clobbered before being used - no need for phi */
+      if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue;
+      MIR *phi =
+          static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo));
+      phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
+      phi->dalvikInsn.vA = dalvik_reg;
+      phi->offset = phi_bb->start_offset;
+      phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
+      PrependMIR(phi_bb, phi);
+    }
+  }
+}
+
+/*
+ * Worker function to insert phi-operands with latest SSA names from
+ * predecessor blocks
+ */
+bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb)
+{
+  MIR *mir;
+  std::vector<int> uses;
+  std::vector<int> incoming_arc;
+
+  /* Phi nodes are at the beginning of each block */
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
+      return true;
+    int ssa_reg = mir->ssa_rep->defs[0];
+    DCHECK_GE(ssa_reg, 0);   // Shouldn't see compiler temps here
+    int v_reg = SRegToVReg(ssa_reg);
+
+    uses.clear();
+    incoming_arc.clear();
+
+    /* Iterate through the predecessors */
+    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+    while (true) {
+      BasicBlock* pred_bb = iter.Next();
+      if (!pred_bb) break;
+      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+      uses.push_back(ssa_reg);
+      incoming_arc.push_back(pred_bb->id);
+    }
+
+    /* Count the number of SSA registers for a Dalvik register */
+    int num_uses = uses.size();
+    mir->ssa_rep->num_uses = num_uses;
+    mir->ssa_rep->uses =
+        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                         ArenaAllocator::kAllocDFInfo));
+    mir->ssa_rep->fp_use =
+        static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                          ArenaAllocator::kAllocDFInfo));
+    int* incoming =
+        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                         ArenaAllocator::kAllocDFInfo));
+    // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
+    mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
+
+    /* Set the uses array for the phi node */
+    int *use_ptr = mir->ssa_rep->uses;
+    for (int i = 0; i < num_uses; i++) {
+      *use_ptr++ = uses[i];
+      *incoming++ = incoming_arc[i];
+    }
+  }
+
+  return true;
+}
+
+void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block)
+{
+
+  if (block->visited || block->hidden) return;
+  block->visited = true;
+
+  /* Process this block */
+  DoSSAConversion(block);
+  int map_size = sizeof(int) * cu_->num_dalvik_registers;
+
+  /* Save SSA map snapshot */
+  int* saved_ssa_map =
+      static_cast<int*>(arena_->NewMem(map_size, false, ArenaAllocator::kAllocDalvikToSSAMap));
+  memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
+
+  if (block->fall_through) {
+    DoDFSPreOrderSSARename(block->fall_through);
+    /* Restore SSA map snapshot */
+    memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+  }
+  if (block->taken) {
+    DoDFSPreOrderSSARename(block->taken);
+    /* Restore SSA map snapshot */
+    memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+  }
+  if (block->successor_block_list.block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
+    while (true) {
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
+      if (successor_block_info == NULL) break;
+      BasicBlock* succ_bb = successor_block_info->block;
+      DoDFSPreOrderSSARename(succ_bb);
+      /* Restore SSA map snapshot */
+      memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+    }
+  }
+  vreg_to_ssa_map_ = saved_ssa_map;
+  return;
+}
+
+/* Perform SSA transformation for the whole method */
+void MIRGraph::SSATransformation()
+{
+  /* Compute the DFS order */
+  ComputeDFSOrders();
+
+  /* Compute the dominator info */
+  ComputeDominators();
+
+  /* Allocate data structures in preparation for SSA conversion */
+  CompilerInitializeSSAConversion();
+
+  /* Find out the "Dalvik reg def x block" relation */
+  ComputeDefBlockMatrix();
+
+  /* Insert phi nodes to dominance frontiers for all variables */
+  InsertPhiNodes();
+
+  /* Rename register names by local defs and phi nodes */
+  ClearAllVisitedFlags();
+  DoDFSPreOrderSSARename(GetEntryBlock());
+
+  /*
+   * Shared temp bit vector used by each block to count the number of defs
+   * from all the predecessor blocks.
+   */
+  temp_ssa_register_v_ =
+      new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
+
+  /* Insert phi-operands with latest SSA names from predecessor blocks */
+  ReachableNodesIterator iter2(this, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    InsertPhiNodeOperands(bb);
+  }
+
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/3_post_ssa_cfg/", false);
+  }
+  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
+    VerifyDataflow();
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
new file mode 100644
index 0000000..adbda5c
--- /dev/null
+++ b/compiler/dex/vreg_analysis.cc
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dex/dataflow_iterator-inl.h"
+
+namespace art {
+
+bool MIRGraph::SetFp(int index, bool is_fp) {
+  bool change = false;
+  if (is_fp && !reg_location_[index].fp) {
+    reg_location_[index].fp = true;
+    reg_location_[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+bool MIRGraph::SetCore(int index, bool is_core) {
+  bool change = false;
+  if (is_core && !reg_location_[index].defined) {
+    reg_location_[index].core = true;
+    reg_location_[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+bool MIRGraph::SetRef(int index, bool is_ref) {
+  bool change = false;
+  if (is_ref && !reg_location_[index].defined) {
+    reg_location_[index].ref = true;
+    reg_location_[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+bool MIRGraph::SetWide(int index, bool is_wide) {
+  bool change = false;
+  if (is_wide && !reg_location_[index].wide) {
+    reg_location_[index].wide = true;
+    change = true;
+  }
+  return change;
+}
+
+bool MIRGraph::SetHigh(int index, bool is_high) {
+  bool change = false;
+  if (is_high && !reg_location_[index].high_word) {
+    reg_location_[index].high_word = true;
+    change = true;
+  }
+  return change;
+}
+
+/*
+ * Infer types and sizes.  We don't need to track change on sizes,
+ * as it doesn't propagate.  We're guaranteed at least one pass through
+ * the cfg.
+ */
+bool MIRGraph::InferTypeAndSize(BasicBlock* bb)
+{
+  MIR *mir;
+  bool changed = false;   // Did anything change?
+
+  if (bb->data_flow_info == NULL) return false;
+  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock)
+    return false;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    SSARepresentation *ssa_rep = mir->ssa_rep;
+    if (ssa_rep) {
+      int attrs = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+
+      // Handle defs
+      if (attrs & DF_DA) {
+        if (attrs & DF_CORE_A) {
+          changed |= SetCore(ssa_rep->defs[0], true);
+        }
+        if (attrs & DF_REF_A) {
+          changed |= SetRef(ssa_rep->defs[0], true);
+        }
+        if (attrs & DF_A_WIDE) {
+          reg_location_[ssa_rep->defs[0]].wide = true;
+          reg_location_[ssa_rep->defs[1]].wide = true;
+          reg_location_[ssa_rep->defs[1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->defs[0])+1,
+          SRegToVReg(ssa_rep->defs[1]));
+        }
+      }
+
+      // Handles uses
+      int next = 0;
+      if (attrs & DF_UA) {
+        if (attrs & DF_CORE_A) {
+          changed |= SetCore(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_A) {
+          changed |= SetRef(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_A_WIDE) {
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+          SRegToVReg(ssa_rep->uses[next + 1]));
+          next += 2;
+        } else {
+          next++;
+        }
+      }
+      if (attrs & DF_UB) {
+        if (attrs & DF_CORE_B) {
+          changed |= SetCore(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_B) {
+          changed |= SetRef(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_B_WIDE) {
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+                               SRegToVReg(ssa_rep->uses[next + 1]));
+          next += 2;
+        } else {
+          next++;
+        }
+      }
+      if (attrs & DF_UC) {
+        if (attrs & DF_CORE_C) {
+          changed |= SetCore(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_C) {
+          changed |= SetRef(ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_C_WIDE) {
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+          SRegToVReg(ssa_rep->uses[next + 1]));
+        }
+      }
+
+      // Special-case return handling
+      if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
+          (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
+          (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
+        switch(cu_->shorty[0]) {
+            case 'I':
+              changed |= SetCore(ssa_rep->uses[0], true);
+              break;
+            case 'J':
+              changed |= SetCore(ssa_rep->uses[0], true);
+              changed |= SetCore(ssa_rep->uses[1], true);
+              reg_location_[ssa_rep->uses[0]].wide = true;
+              reg_location_[ssa_rep->uses[1]].wide = true;
+              reg_location_[ssa_rep->uses[1]].high_word = true;
+              break;
+            case 'F':
+              changed |= SetFp(ssa_rep->uses[0], true);
+              break;
+            case 'D':
+              changed |= SetFp(ssa_rep->uses[0], true);
+              changed |= SetFp(ssa_rep->uses[1], true);
+              reg_location_[ssa_rep->uses[0]].wide = true;
+              reg_location_[ssa_rep->uses[1]].wide = true;
+              reg_location_[ssa_rep->uses[1]].high_word = true;
+              break;
+            case 'L':
+              changed |= SetRef(ssa_rep->uses[0], true);
+              break;
+            default: break;
+        }
+      }
+
+      // Special-case handling for format 35c/3rc invokes
+      Instruction::Code opcode = mir->dalvikInsn.opcode;
+      int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
+          ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
+      if ((flags & Instruction::kInvoke) &&
+          (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
+        DCHECK_EQ(next, 0);
+        int target_idx = mir->dalvikInsn.vB;
+        const char* shorty = GetShortyFromTargetIdx(target_idx);
+        // Handle result type if floating point
+        if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
+          MIR* move_result_mir = FindMoveResult(bb, mir);
+          // Result might not be used at all, so no move-result
+          if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
+              Instruction::MOVE_RESULT_OBJECT)) {
+            SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
+            DCHECK(tgt_rep != NULL);
+            tgt_rep->fp_def[0] = true;
+            changed |= SetFp(tgt_rep->defs[0], true);
+            if (shorty[0] == 'D') {
+              tgt_rep->fp_def[1] = true;
+              changed |= SetFp(tgt_rep->defs[1], true);
+            }
+          }
+        }
+        int num_uses = mir->dalvikInsn.vA;
+        // If this is a non-static invoke, mark implicit "this"
+        if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
+            (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
+          reg_location_[ssa_rep->uses[next]].defined = true;
+          reg_location_[ssa_rep->uses[next]].ref = true;
+          next++;
+        }
+        uint32_t cpos = 1;
+        if (strlen(shorty) > 1) {
+          for (int i = next; i < num_uses;) {
+            DCHECK_LT(cpos, strlen(shorty));
+            switch (shorty[cpos++]) {
+              case 'D':
+                ssa_rep->fp_use[i] = true;
+                ssa_rep->fp_use[i+1] = true;
+                reg_location_[ssa_rep->uses[i]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].high_word = true;
+                DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+                i++;
+                break;
+              case 'J':
+                reg_location_[ssa_rep->uses[i]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].high_word = true;
+                DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+                changed |= SetCore(ssa_rep->uses[i],true);
+                i++;
+                break;
+              case 'F':
+                ssa_rep->fp_use[i] = true;
+                break;
+              case 'L':
+                changed |= SetRef(ssa_rep->uses[i], true);
+                break;
+              default:
+                changed |= SetCore(ssa_rep->uses[i], true);
+                break;
+            }
+            i++;
+          }
+        }
+      }
+
+      for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
+        if (ssa_rep->fp_use[i])
+          changed |= SetFp(ssa_rep->uses[i], true);
+        }
+      for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
+        if (ssa_rep->fp_def[i])
+          changed |= SetFp(ssa_rep->defs[i], true);
+        }
+      // Special-case handling for moves & Phi
+      if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
+        /*
+         * If any of our inputs or outputs is defined, set all.
+         * Some ugliness related to Phi nodes and wide values.
+         * The Phi set will include all low words or all high
+         * words, so we have to treat them specially.
+         */
+        bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
+                      kMirOpPhi);
+        RegLocation rl_temp = reg_location_[ssa_rep->defs[0]];
+        bool defined_fp = rl_temp.defined && rl_temp.fp;
+        bool defined_core = rl_temp.defined && rl_temp.core;
+        bool defined_ref = rl_temp.defined && rl_temp.ref;
+        bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
+        bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
+        for (int i = 0; i < ssa_rep->num_uses;i++) {
+          rl_temp = reg_location_[ssa_rep->uses[i]];
+          defined_fp |= rl_temp.defined && rl_temp.fp;
+          defined_core |= rl_temp.defined && rl_temp.core;
+          defined_ref |= rl_temp.defined && rl_temp.ref;
+          is_wide |= rl_temp.wide;
+          is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
+        }
+        /*
+         * We don't normally expect to see a Dalvik register definition used both as a
+         * floating point and core value, though technically it could happen with constants.
+         * Until we have proper typing, detect this situation and disable register promotion
+         * (which relies on the distinction between core a fp usages).
+         */
+        if ((defined_fp && (defined_core | defined_ref)) &&
+            ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
+          LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                       << " op at block " << bb->id
+                       << " has both fp and core/ref uses for same def.";
+          cu_->disable_opt |= (1 << kPromoteRegs);
+        }
+        changed |= SetFp(ssa_rep->defs[0], defined_fp);
+        changed |= SetCore(ssa_rep->defs[0], defined_core);
+        changed |= SetRef(ssa_rep->defs[0], defined_ref);
+        changed |= SetWide(ssa_rep->defs[0], is_wide);
+        changed |= SetHigh(ssa_rep->defs[0], is_high);
+        if (attrs & DF_A_WIDE) {
+          changed |= SetWide(ssa_rep->defs[1], true);
+          changed |= SetHigh(ssa_rep->defs[1], true);
+        }
+        for (int i = 0; i < ssa_rep->num_uses; i++) {
+          changed |= SetFp(ssa_rep->uses[i], defined_fp);
+          changed |= SetCore(ssa_rep->uses[i], defined_core);
+          changed |= SetRef(ssa_rep->uses[i], defined_ref);
+          changed |= SetWide(ssa_rep->uses[i], is_wide);
+          changed |= SetHigh(ssa_rep->uses[i], is_high);
+        }
+        if (attrs & DF_A_WIDE) {
+          DCHECK_EQ(ssa_rep->num_uses, 2);
+          changed |= SetWide(ssa_rep->uses[1], true);
+          changed |= SetHigh(ssa_rep->uses[1], true);
+        }
+      }
+    }
+  }
+  return changed;
+}
+
+static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
+
+void MIRGraph::DumpRegLocTable(RegLocation* table, int count)
+{
+  //FIXME: Quick-specific.  Move to Quick (and make a generic version for MIRGraph?
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
+  if (cg != NULL) {
+    for (int i = 0; i < count; i++) {
+      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
+          table[i].orig_sreg, storage_name[table[i].location],
+          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+          table[i].is_const ? 'c' : 'n',
+          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+          cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
+          table[i].low_reg & cg->FpRegMask(),
+          cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
+          table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
+    }
+  } else {
+    // Either pre-regalloc or Portable.
+    for (int i = 0; i < count; i++) {
+      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c S%d",
+          table[i].orig_sreg, storage_name[table[i].location],
+          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+          table[i].is_const ? 'c' : 'n',
+          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+          table[i].s_reg_low);
+    }
+  }
+}
+
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+                                     INVALID_REG, INVALID_REG, INVALID_SREG,
+                                     INVALID_SREG};
+
+/*
+ * Simple register allocation.  Some Dalvik virtual registers may
+ * be promoted to physical registers.  Most of the work for temp
+ * allocation is done on the fly.  We also do some initialization and
+ * type inference here.
+ */
+void MIRGraph::BuildRegLocations()
+{
+  int i;
+  RegLocation* loc;
+
+  /* Allocate the location map */
+  loc = static_cast<RegLocation*>(arena_->NewMem(GetNumSSARegs() * sizeof(*loc), true,
+                                                 ArenaAllocator::kAllocRegAlloc));
+  for (i=0; i < GetNumSSARegs(); i++) {
+    loc[i] = fresh_loc;
+    loc[i].s_reg_low = i;
+    loc[i].is_const = is_constant_v_->IsBitSet(i);
+  }
+
+  /* Patch up the locations for Method* and the compiler temps */
+  loc[method_sreg_].location = kLocCompilerTemp;
+  loc[method_sreg_].defined = true;
+  for (i = 0; i < cu_->num_compiler_temps; i++) {
+    CompilerTemp* ct = compiler_temps_.Get(i);
+    loc[ct->s_reg].location = kLocCompilerTemp;
+    loc[ct->s_reg].defined = true;
+  }
+
+  reg_location_ = loc;
+
+  int num_regs = cu_->num_dalvik_registers;
+
+  /* Add types of incoming arguments based on signature */
+  int num_ins = cu_->num_ins;
+  if (num_ins > 0) {
+    int s_reg = num_regs - num_ins;
+    if ((cu_->access_flags & kAccStatic) == 0) {
+      // For non-static, skip past "this"
+      reg_location_[s_reg].defined = true;
+      reg_location_[s_reg].ref = true;
+      s_reg++;
+    }
+    const char* shorty = cu_->shorty;
+    int shorty_len = strlen(shorty);
+    for (int i = 1; i < shorty_len; i++) {
+      switch (shorty[i]) {
+        case 'D':
+          reg_location_[s_reg].wide = true;
+          reg_location_[s_reg+1].high_word = true;
+          reg_location_[s_reg+1].fp = true;
+          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
+          reg_location_[s_reg].fp = true;
+          reg_location_[s_reg].defined = true;
+          s_reg++;
+          break;
+        case 'J':
+          reg_location_[s_reg].wide = true;
+          reg_location_[s_reg+1].high_word = true;
+          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
+          reg_location_[s_reg].core = true;
+          reg_location_[s_reg].defined = true;
+          s_reg++;
+          break;
+        case 'F':
+          reg_location_[s_reg].fp = true;
+          reg_location_[s_reg].defined = true;
+          break;
+        case 'L':
+          reg_location_[s_reg].ref = true;
+          reg_location_[s_reg].defined = true;
+          break;
+        default:
+          reg_location_[s_reg].core = true;
+          reg_location_[s_reg].defined = true;
+          break;
+        }
+        s_reg++;
+      }
+  }
+
+  /* Do type & size inference pass */
+  PreOrderDfsIterator iter(this, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+    change = InferTypeAndSize(bb);
+  }
+
+  /*
+   * Set the s_reg_low field to refer to the pre-SSA name of the
+   * base Dalvik virtual register.  Once we add a better register
+   * allocator, remove this remapping.
+   */
+  for (i=0; i < GetNumSSARegs(); i++) {
+    if (reg_location_[i].location != kLocCompilerTemp) {
+      int orig_sreg = reg_location_[i].s_reg_low;
+      reg_location_[i].orig_sreg = orig_sreg;
+      reg_location_[i].s_reg_low = SRegToVReg(orig_sreg);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
new file mode 100644
index 0000000..8388cfb
--- /dev/null
+++ b/compiler/driver/compiler_driver.cc
@@ -0,0 +1,2404 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_driver.h"
+
+#include <vector>
+
+#include <unistd.h>
+
+#include "base/stl_util.h"
+#include "base/timing_logger.h"
+#include "class_linker.h"
+#include "dex_compilation_unit.h"
+#include "dex_file-inl.h"
+#include "jni_internal.h"
+#include "oat_file.h"
+#include "object_utils.h"
+#include "runtime.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/space/space.h"
+#include "mirror/class_loader.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/throwable.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "stubs/stubs.h"
+#include "thread.h"
+#include "thread_pool.h"
+#include "verifier/method_verifier.h"
+
+#if defined(ART_USE_PORTABLE_COMPILER)
+#include "elf_writer_mclinker.h"
+#else
+#include "elf_writer_quick.h"
+#endif
+
+namespace art {
+
+static double Percentage(size_t x, size_t y) {
+  return 100.0 * (static_cast<double>(x)) / (static_cast<double>(x + y));
+}
+
+static void DumpStat(size_t x, size_t y, const char* str) {
+  if (x == 0 && y == 0) {
+    return;
+  }
+  LOG(INFO) << Percentage(x, y) << "% of " << str << " for " << (x + y) << " cases";
+}
+
+class AOTCompilationStats {
+ public:
+  AOTCompilationStats()
+      : stats_lock_("AOT compilation statistics lock"),
+        types_in_dex_cache_(0), types_not_in_dex_cache_(0),
+        strings_in_dex_cache_(0), strings_not_in_dex_cache_(0),
+        resolved_types_(0), unresolved_types_(0),
+        resolved_instance_fields_(0), unresolved_instance_fields_(0),
+        resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
+        type_based_devirtualization_(0),
+        safe_casts_(0), not_safe_casts_(0) {
+    for (size_t i = 0; i <= kMaxInvokeType; i++) {
+      resolved_methods_[i] = 0;
+      unresolved_methods_[i] = 0;
+      virtual_made_direct_[i] = 0;
+      direct_calls_to_boot_[i] = 0;
+      direct_methods_to_boot_[i] = 0;
+    }
+  }
+
+  void Dump() {
+    DumpStat(types_in_dex_cache_, types_not_in_dex_cache_, "types known to be in dex cache");
+    DumpStat(strings_in_dex_cache_, strings_not_in_dex_cache_, "strings known to be in dex cache");
+    DumpStat(resolved_types_, unresolved_types_, "types resolved");
+    DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
+    DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_,
+             "static fields resolved");
+    DumpStat(resolved_local_static_fields_, resolved_static_fields_ + unresolved_static_fields_,
+             "static fields local to a class");
+    DumpStat(safe_casts_, not_safe_casts_, "check-casts removed based on type information");
+    // Note, the code below subtracts the stat value so that when added to the stat value we have
+    // 100% of samples. TODO: clean this up.
+    DumpStat(type_based_devirtualization_,
+             resolved_methods_[kVirtual] + unresolved_methods_[kVirtual] +
+             resolved_methods_[kInterface] + unresolved_methods_[kInterface] -
+             type_based_devirtualization_,
+             "virtual/interface calls made direct based on type information");
+
+    for (size_t i = 0; i <= kMaxInvokeType; i++) {
+      std::ostringstream oss;
+      oss << static_cast<InvokeType>(i) << " methods were AOT resolved";
+      DumpStat(resolved_methods_[i], unresolved_methods_[i], oss.str().c_str());
+      if (virtual_made_direct_[i] > 0) {
+        std::ostringstream oss2;
+        oss2 << static_cast<InvokeType>(i) << " methods made direct";
+        DumpStat(virtual_made_direct_[i],
+                 resolved_methods_[i] + unresolved_methods_[i] - virtual_made_direct_[i],
+                 oss2.str().c_str());
+      }
+      if (direct_calls_to_boot_[i] > 0) {
+        std::ostringstream oss2;
+        oss2 << static_cast<InvokeType>(i) << " method calls are direct into boot";
+        DumpStat(direct_calls_to_boot_[i],
+                 resolved_methods_[i] + unresolved_methods_[i] - direct_calls_to_boot_[i],
+                 oss2.str().c_str());
+      }
+      if (direct_methods_to_boot_[i] > 0) {
+        std::ostringstream oss2;
+        oss2 << static_cast<InvokeType>(i) << " method calls have methods in boot";
+        DumpStat(direct_methods_to_boot_[i],
+                 resolved_methods_[i] + unresolved_methods_[i] - direct_methods_to_boot_[i],
+                 oss2.str().c_str());
+      }
+    }
+  }
+
+// Allow lossy statistics in non-debug builds.
+#ifndef NDEBUG
+#define STATS_LOCK() MutexLock mu(Thread::Current(), stats_lock_)
+#else
+#define STATS_LOCK()
+#endif
+
+  void TypeInDexCache() {
+    STATS_LOCK();
+    types_in_dex_cache_++;
+  }
+
+  void TypeNotInDexCache() {
+    STATS_LOCK();
+    types_not_in_dex_cache_++;
+  }
+
+  void StringInDexCache() {
+    STATS_LOCK();
+    strings_in_dex_cache_++;
+  }
+
+  void StringNotInDexCache() {
+    STATS_LOCK();
+    strings_not_in_dex_cache_++;
+  }
+
+  void TypeDoesntNeedAccessCheck() {
+    STATS_LOCK();
+    resolved_types_++;
+  }
+
+  void TypeNeedsAccessCheck() {
+    STATS_LOCK();
+    unresolved_types_++;
+  }
+
+  void ResolvedInstanceField() {
+    STATS_LOCK();
+    resolved_instance_fields_++;
+  }
+
+  void UnresolvedInstanceField() {
+    STATS_LOCK();
+    unresolved_instance_fields_++;
+  }
+
+  void ResolvedLocalStaticField() {
+    STATS_LOCK();
+    resolved_local_static_fields_++;
+  }
+
+  void ResolvedStaticField() {
+    STATS_LOCK();
+    resolved_static_fields_++;
+  }
+
+  void UnresolvedStaticField() {
+    STATS_LOCK();
+    unresolved_static_fields_++;
+  }
+
+  // Indicate that type information from the verifier led to devirtualization.
+  void PreciseTypeDevirtualization() {
+    STATS_LOCK();
+    type_based_devirtualization_++;
+  }
+
+  // Indicate that a method of the given type was resolved at compile time.
+  void ResolvedMethod(InvokeType type) {
+    DCHECK_LE(type, kMaxInvokeType);
+    STATS_LOCK();
+    resolved_methods_[type]++;
+  }
+
+  // Indicate that a method of the given type was unresolved at compile time as it was in an
+  // unknown dex file.
+  void UnresolvedMethod(InvokeType type) {
+    DCHECK_LE(type, kMaxInvokeType);
+    STATS_LOCK();
+    unresolved_methods_[type]++;
+  }
+
+  // Indicate that a type of virtual method dispatch has been converted into a direct method
+  // dispatch.
+  void VirtualMadeDirect(InvokeType type) {
+    DCHECK(type == kVirtual || type == kInterface || type == kSuper);
+    STATS_LOCK();
+    virtual_made_direct_[type]++;
+  }
+
+  // Indicate that a method of the given type was able to call directly into boot.
+  void DirectCallsToBoot(InvokeType type) {
+    DCHECK_LE(type, kMaxInvokeType);
+    STATS_LOCK();
+    direct_calls_to_boot_[type]++;
+  }
+
+  // Indicate that a method of the given type was able to be resolved directly from boot.
+  void DirectMethodsToBoot(InvokeType type) {
+    DCHECK_LE(type, kMaxInvokeType);
+    STATS_LOCK();
+    direct_methods_to_boot_[type]++;
+  }
+
+  // A check-cast could be eliminated due to verifier type analysis.
+  void SafeCast() {
+    STATS_LOCK();
+    safe_casts_++;
+  }
+
+  // A check-cast couldn't be eliminated due to verifier type analysis.
+  void NotASafeCast() {
+    STATS_LOCK();
+    not_safe_casts_++;
+  }
+
+ private:
+  Mutex stats_lock_;
+
+  size_t types_in_dex_cache_;
+  size_t types_not_in_dex_cache_;
+
+  size_t strings_in_dex_cache_;
+  size_t strings_not_in_dex_cache_;
+
+  size_t resolved_types_;
+  size_t unresolved_types_;
+
+  size_t resolved_instance_fields_;
+  size_t unresolved_instance_fields_;
+
+  size_t resolved_local_static_fields_;
+  size_t resolved_static_fields_;
+  size_t unresolved_static_fields_;
+  // Type based devirtualization for invoke interface and virtual.
+  size_t type_based_devirtualization_;
+
+  size_t resolved_methods_[kMaxInvokeType + 1];
+  size_t unresolved_methods_[kMaxInvokeType + 1];
+  size_t virtual_made_direct_[kMaxInvokeType + 1];
+  size_t direct_calls_to_boot_[kMaxInvokeType + 1];
+  size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+
+  size_t safe_casts_;
+  size_t not_safe_casts_;
+
+  DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
+};
+
+extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver);
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& compiler);
+
+extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler);
+
+extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
+                                                 const art::DexFile::CodeItem* code_item,
+                                                 uint32_t access_flags,
+                                                 art::InvokeType invoke_type,
+                                                 uint32_t class_def_idx,
+                                                 uint32_t method_idx,
+                                                 jobject class_loader,
+                                                 const art::DexFile& dex_file);
+extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler,
+                                                      const art::DexFile::CodeItem* code_item,
+                                                      uint32_t access_flags,
+                                                      art::InvokeType invoke_type,
+                                                      uint32_t class_def_idx,
+                                                      uint32_t method_idx,
+                                                      jobject class_loader,
+                                                      const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler,
+                                              const art::DexFile::CodeItem* code_item,
+                                              uint32_t access_flags,
+                                              art::InvokeType invoke_type,
+                                              uint32_t class_def_idx,
+                                              uint32_t method_idx,
+                                              jobject class_loader,
+                                              const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler,
+                                                   const art::DexFile::CodeItem* code_item,
+                                                   uint32_t access_flags,
+                                                   art::InvokeType invoke_type,
+                                                   uint32_t class_def_idx,
+                                                   uint32_t method_idx,
+                                                   jobject class_loader,
+                                                   const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver,
+                                                        uint32_t access_flags, uint32_t method_idx,
+                                                        const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler,
+                                                         uint32_t access_flags, uint32_t method_idx,
+                                                         const art::DexFile& dex_file);
+
+extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
+                                               std::string const& filename);
+
+CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set,
+                               bool image, DescriptorSet* image_classes,
+                               size_t thread_count, bool support_debugging,
+                               bool dump_stats, bool dump_timings)
+    : compiler_backend_(compiler_backend),
+      instruction_set_(instruction_set),
+      freezing_constructor_lock_("freezing constructor lock"),
+      compiled_classes_lock_("compiled classes lock"),
+      compiled_methods_lock_("compiled method lock"),
+      image_(image),
+      image_classes_(image_classes),
+      thread_count_(thread_count),
+      support_debugging_(support_debugging),
+      start_ns_(0),
+      stats_(new AOTCompilationStats),
+      dump_stats_(dump_stats),
+      dump_timings_(dump_timings),
+      compiler_library_(NULL),
+      compiler_(NULL),
+      compiler_context_(NULL),
+      jni_compiler_(NULL),
+      compiler_enable_auto_elf_loading_(NULL),
+      compiler_get_method_code_addr_(NULL),
+      support_boot_image_fixup_(true)
+{
+  CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
+
+  // TODO: more work needed to combine initializations and allow per-method backend selection
+  typedef void (*InitCompilerContextFn)(CompilerDriver&);
+  InitCompilerContextFn init_compiler_context;
+  if (compiler_backend_ == kPortable){
+    // Initialize compiler_context_
+    init_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtInitCompilerContext);
+    compiler_ = reinterpret_cast<CompilerFn>(ArtCompileMethod);
+  } else {
+    init_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtInitQuickCompilerContext);
+    compiler_ = reinterpret_cast<CompilerFn>(ArtQuickCompileMethod);
+  }
+
+  dex_to_dex_compiler_ = reinterpret_cast<CompilerFn>(ArtCompileDEX);
+
+#ifdef ART_SEA_IR_MODE
+  sea_ir_compiler_ = NULL;
+  if (Runtime::Current()->IsSeaIRMode()) {
+    sea_ir_compiler_ = reinterpret_cast<CompilerFn>(SeaIrCompileMethod);
+  }
+#endif
+
+  init_compiler_context(*this);
+
+  if (compiler_backend_ == kPortable) {
+    jni_compiler_ = reinterpret_cast<JniCompilerFn>(ArtLLVMJniCompileMethod);
+  } else {
+    jni_compiler_ = reinterpret_cast<JniCompilerFn>(ArtQuickJniCompileMethod);
+  }
+
+  CHECK(!Runtime::Current()->IsStarted());
+  if (!image_) {
+    CHECK(image_classes_.get() == NULL);
+  }
+}
+
+CompilerDriver::~CompilerDriver() {
+  Thread* self = Thread::Current();
+  {
+    MutexLock mu(self, compiled_classes_lock_);
+    STLDeleteValues(&compiled_classes_);
+  }
+  {
+    MutexLock mu(self, compiled_methods_lock_);
+    STLDeleteValues(&compiled_methods_);
+  }
+  {
+    MutexLock mu(self, compiled_methods_lock_);
+    STLDeleteElements(&code_to_patch_);
+  }
+  {
+    MutexLock mu(self, compiled_methods_lock_);
+    STLDeleteElements(&methods_to_patch_);
+  }
+  CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
+  typedef void (*UninitCompilerContextFn)(CompilerDriver&);
+  UninitCompilerContextFn uninit_compiler_context;
+  // Uninitialize compiler_context_
+  // TODO: rework to combine initialization/uninitialization
+  if (compiler_backend_ == kPortable) {
+    uninit_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtUnInitCompilerContext);
+  } else {
+    uninit_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtUnInitQuickCompilerContext);
+  }
+  uninit_compiler_context(*this);
+}
+
+CompilerTls* CompilerDriver::GetTls() {
+  // Lazily create thread-local storage
+  CompilerTls* res = static_cast<CompilerTls*>(pthread_getspecific(tls_key_));
+  if (res == NULL) {
+    res = new CompilerTls();
+    CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls");
+  }
+  return res;
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
+  switch (instruction_set_) {
+    case kArm:
+    case kThumb2:
+      return arm::CreatePortableResolutionTrampoline();
+    case kMips:
+      return mips::CreatePortableResolutionTrampoline();
+    case kX86:
+      return x86::CreatePortableResolutionTrampoline();
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+      return NULL;
+  }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
+  switch (instruction_set_) {
+    case kArm:
+    case kThumb2:
+      return arm::CreateQuickResolutionTrampoline();
+    case kMips:
+      return mips::CreateQuickResolutionTrampoline();
+    case kX86:
+      return x86::CreateQuickResolutionTrampoline();
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+      return NULL;
+  }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterEntry() const {
+  switch (instruction_set_) {
+    case kArm:
+    case kThumb2:
+      return arm::CreateInterpreterToInterpreterEntry();
+    case kMips:
+      return mips::CreateInterpreterToInterpreterEntry();
+    case kX86:
+      return x86::CreateInterpreterToInterpreterEntry();
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+      return NULL;
+  }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToQuickEntry() const {
+  switch (instruction_set_) {
+    case kArm:
+    case kThumb2:
+      return arm::CreateInterpreterToQuickEntry();
+    case kMips:
+      return mips::CreateInterpreterToQuickEntry();
+    case kX86:
+      return x86::CreateInterpreterToQuickEntry();
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+      return NULL;
+  }
+}
+
+void CompilerDriver::CompileAll(jobject class_loader,
+                                const std::vector<const DexFile*>& dex_files) {
+  DCHECK(!Runtime::Current()->IsStarted());
+
+  UniquePtr<ThreadPool> thread_pool(new ThreadPool(thread_count_));
+  TimingLogger timings("compiler", false);
+
+  PreCompile(class_loader, dex_files, *thread_pool.get(), timings);
+
+  Compile(class_loader, dex_files, *thread_pool.get(), timings);
+
+  if (dump_timings_ && timings.GetTotalNs() > MsToNs(1000)) {
+    LOG(INFO) << Dumpable<TimingLogger>(timings);
+  }
+
+  if (dump_stats_) {
+    stats_->Dump();
+  }
+}
+
+static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader,
+                                         const DexFile& dex_file,
+                                         const DexFile::ClassDef& class_def)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Do not allow DEX-to-DEX compilation of image classes. This is to prevent the
+  // verifier from passing on "quick" instruction at compilation time. It must
+  // only pass on quick instructions at runtime.
+  if (class_loader == NULL) {
+    return false;
+  }
+  const char* descriptor = dex_file.GetClassDescriptor(class_def);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  mirror::Class* klass = class_linker->FindClass(descriptor, class_loader);
+  if (klass == NULL) {
+    Thread* self = Thread::Current();
+    CHECK(self->IsExceptionPending());
+    self->ClearException();
+    return false;
+  }
+  // DEX-to-DEX compilation is only allowed on preverified classes.
+  return klass->IsVerified();
+}
+
+void CompilerDriver::CompileOne(const mirror::AbstractMethod* method) {
+  DCHECK(!Runtime::Current()->IsStarted());
+  Thread* self = Thread::Current();
+  jobject jclass_loader;
+  const DexFile* dex_file;
+  uint32_t class_def_idx;
+  {
+    ScopedObjectAccessUnchecked soa(self);
+    ScopedLocalRef<jobject>
+      local_class_loader(soa.Env(),
+                    soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+    jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
+    // Find the dex_file
+    MethodHelper mh(method);
+    dex_file = &mh.GetDexFile();
+    class_def_idx = mh.GetClassDefIndex();
+  }
+  self->TransitionFromRunnableToSuspended(kNative);
+
+  std::vector<const DexFile*> dex_files;
+  dex_files.push_back(dex_file);
+
+  UniquePtr<ThreadPool> thread_pool(new ThreadPool(1U));
+  TimingLogger timings("CompileOne", false);
+  PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings);
+
+  uint32_t method_idx = method->GetDexMethodIndex();
+  const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+  // Can we run DEX-to-DEX compiler on this class ?
+  bool allow_dex_compilation;
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+    mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+    allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, *dex_file, class_def);
+  }
+  CompileMethod(code_item, method->GetAccessFlags(), method->GetInvokeType(),
+                class_def_idx, method_idx, jclass_loader, *dex_file, allow_dex_compilation);
+
+  self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
+
+  self->TransitionFromSuspendedToRunnable();
+}
+
+void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                             ThreadPool& thread_pool, TimingLogger& timings) {
+  for (size_t i = 0; i != dex_files.size(); ++i) {
+    const DexFile* dex_file = dex_files[i];
+    CHECK(dex_file != NULL);
+    ResolveDexFile(class_loader, *dex_file, thread_pool, timings);
+  }
+}
+
+void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                                ThreadPool& thread_pool, TimingLogger& timings) {
+  LoadImageClasses(timings);
+
+  Resolve(class_loader, dex_files, thread_pool, timings);
+
+  Verify(class_loader, dex_files, thread_pool, timings);
+
+  InitializeClasses(class_loader, dex_files, thread_pool, timings);
+
+  UpdateImageClasses(timings);
+}
+
+bool CompilerDriver::IsImageClass(const char* descriptor) const {
+  DCHECK(descriptor != NULL);
+  if (image_classes_.get() == NULL) {
+    return true;
+  }
+  return image_classes_->find(descriptor) != image_classes_->end();
+}
+
+static void ResolveExceptionsForMethod(MethodHelper* mh,
+    std::set<std::pair<uint16_t, const DexFile*> >& exceptions_to_resolve)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const DexFile::CodeItem* code_item = mh->GetCodeItem();
+  if (code_item == NULL) {
+    return;  // native or abstract method
+  }
+  if (code_item->tries_size_ == 0) {
+    return;  // nothing to process
+  }
+  const byte* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
+  size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
+  for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
+    int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
+    bool has_catch_all = false;
+    if (encoded_catch_handler_size <= 0) {
+      encoded_catch_handler_size = -encoded_catch_handler_size;
+      has_catch_all = true;
+    }
+    for (int32_t j = 0; j < encoded_catch_handler_size; j++) {
+      uint16_t encoded_catch_handler_handlers_type_idx =
+          DecodeUnsignedLeb128(&encoded_catch_handler_list);
+      // Add to set of types to resolve if not already in the dex cache resolved types
+      if (!mh->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+        exceptions_to_resolve.insert(
+            std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx,
+                                                &mh->GetDexFile()));
+      }
+      // ignore address associated with catch handler
+      DecodeUnsignedLeb128(&encoded_catch_handler_list);
+    }
+    if (has_catch_all) {
+      // ignore catch all address
+      DecodeUnsignedLeb128(&encoded_catch_handler_list);
+    }
+  }
+}
+
+static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  std::set<std::pair<uint16_t, const DexFile*> >* exceptions_to_resolve =
+      reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*> >*>(arg);
+  MethodHelper mh;
+  for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+    mirror::AbstractMethod* m = c->GetVirtualMethod(i);
+    mh.ChangeMethod(m);
+    ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
+  }
+  for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+    mirror::AbstractMethod* m = c->GetDirectMethod(i);
+    mh.ChangeMethod(m);
+    ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
+  }
+  return true;
+}
+
+static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  CompilerDriver::DescriptorSet* image_classes =
+      reinterpret_cast<CompilerDriver::DescriptorSet*>(arg);
+  image_classes->insert(ClassHelper(klass).GetDescriptor());
+  return true;
+}
+
+// Make a list of descriptors for classes to include in the image
+void CompilerDriver::LoadImageClasses(TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  if (image_classes_.get() == NULL) {
+    return;
+  }
+
+  // Make a first class to load all classes explicitly listed in the file
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  typedef DescriptorSet::iterator It;  // TODO: C++0x auto
+  for (It it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
+    std::string descriptor(*it);
+    SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(descriptor.c_str()));
+    if (klass.get() == NULL) {
+      image_classes_->erase(it++);
+      LOG(WARNING) << "Failed to find class " << descriptor;
+      Thread::Current()->ClearException();
+    } else {
+      ++it;
+    }
+  }
+
+  // Resolve exception classes referenced by the loaded classes. The catch logic assumes
+  // exceptions are resolved by the verifier when there is a catch block in an interested method.
+  // Do this here so that exception classes appear to have been specified image classes.
+  std::set<std::pair<uint16_t, const DexFile*> > unresolved_exception_types;
+  SirtRef<mirror::Class> java_lang_Throwable(self,
+                                     class_linker->FindSystemClass("Ljava/lang/Throwable;"));
+  do {
+    unresolved_exception_types.clear();
+    class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
+                               &unresolved_exception_types);
+    typedef std::set<std::pair<uint16_t, const DexFile*> >::const_iterator It;  // TODO: C++0x auto
+    for (It it = unresolved_exception_types.begin(),
+         end = unresolved_exception_types.end();
+         it != end; ++it) {
+      uint16_t exception_type_idx = it->first;
+      const DexFile* dex_file = it->second;
+      mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file);
+      mirror:: ClassLoader* class_loader = NULL;
+      SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
+                                                                   dex_cache, class_loader));
+      if (klass.get() == NULL) {
+        const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
+        const char* descriptor = dex_file->GetTypeDescriptor(type_id);
+        LOG(FATAL) << "Failed to resolve class " << descriptor;
+      }
+      DCHECK(java_lang_Throwable->IsAssignableFrom(klass.get()));
+    }
+    // Resolving exceptions may load classes that reference more exceptions, iterate until no
+    // more are found
+  } while (!unresolved_exception_types.empty());
+
+  // We walk the roots looking for classes so that we'll pick up the
+  // above classes plus any classes them depend on such super
+  // classes, interfaces, and the required ClassLinker roots.
+  class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get());
+
+  CHECK_NE(image_classes_->size(), 0U);
+  timings.AddSplit("LoadImageClasses");
+}
+
+static void MaybeAddToImageClasses(mirror::Class* klass, CompilerDriver::DescriptorSet* image_classes)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  while (!klass->IsObjectClass()) {
+    ClassHelper kh(klass);
+    const char* descriptor = kh.GetDescriptor();
+    std::pair<CompilerDriver::DescriptorSet::iterator, bool> result =
+        image_classes->insert(descriptor);
+    if (result.second) {
+      LOG(INFO) << "Adding " << descriptor << " to image classes";
+    } else {
+      return;
+    }
+    for (size_t i = 0; i < kh.NumDirectInterfaces(); ++i) {
+      MaybeAddToImageClasses(kh.GetDirectInterface(i), image_classes);
+    }
+    if (klass->IsArrayClass()) {
+      MaybeAddToImageClasses(klass->GetComponentType(), image_classes);
+    }
+    klass = klass->GetSuperClass();
+  }
+}
+
+void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void* arg) {
+  DCHECK(object != NULL);
+  DCHECK(arg != NULL);
+  CompilerDriver* compiler_driver = reinterpret_cast<CompilerDriver*>(arg);
+  MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get());
+}
+
+void CompilerDriver::UpdateImageClasses(TimingLogger& timings) {
+  if (image_classes_.get() == NULL) {
+    return;
+  }
+
+  // Update image_classes_ with classes for objects created by <clinit> methods.
+  Thread* self = Thread::Current();
+  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  // TODO: Image spaces only?
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  heap->FlushAllocStack();
+  heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this);
+  self->EndAssertNoThreadSuspension(old_cause);
+  timings.AddSplit("UpdateImageClasses");
+}
+
+void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compiled_class) {
+  MutexLock mu(Thread::Current(), CompilerDriver::compiled_classes_lock_);
+  compiled_classes_.Put(ref, compiled_class);
+}
+
+bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file,
+                                                      uint32_t type_idx) {
+  if (IsImage() && IsImageClass(dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)))) {
+    if (kIsDebugBuild) {
+      ScopedObjectAccess soa(Thread::Current());
+      mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+      mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+      CHECK(resolved_class != NULL);
+    }
+    stats_->TypeInDexCache();
+    return true;
+  } else {
+    stats_->TypeNotInDexCache();
+    return false;
+  }
+}
+
+bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
+                                                        uint32_t string_idx) {
+  // See also Compiler::ResolveDexFile
+
+  bool result = false;
+  if (IsImage()) {
+    // We resolve all const-string strings when building for the image.
+    ScopedObjectAccess soa(Thread::Current());
+    mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+    Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
+    result = true;
+  }
+  if (result) {
+    stats_->StringInDexCache();
+  } else {
+    stats_->StringNotInDexCache();
+  }
+  return result;
+}
+
+bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
+                                                uint32_t type_idx,
+                                                bool* type_known_final, bool* type_known_abstract,
+                                                bool* equals_referrers_class) {
+  if (type_known_final != NULL) {
+    *type_known_final = false;
+  }
+  if (type_known_abstract != NULL) {
+    *type_known_abstract = false;
+  }
+  if (equals_referrers_class != NULL) {
+    *equals_referrers_class = false;
+  }
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+  // Get type from dex cache assuming it was populated by the verifier
+  mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+  if (resolved_class == NULL) {
+    stats_->TypeNeedsAccessCheck();
+    return false;  // Unknown class needs access checks.
+  }
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
+  if (equals_referrers_class != NULL) {
+    *equals_referrers_class = (method_id.class_idx_ == type_idx);
+  }
+  mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
+  if (referrer_class == NULL) {
+    stats_->TypeNeedsAccessCheck();
+    return false;  // Incomplete referrer knowledge needs access check.
+  }
+  // Perform access check, will return true if access is ok or false if we're going to have to
+  // check this at runtime (for example for class loaders).
+  bool result = referrer_class->CanAccess(resolved_class);
+  if (result) {
+    stats_->TypeDoesntNeedAccessCheck();
+    if (type_known_final != NULL) {
+      *type_known_final = resolved_class->IsFinal() && !resolved_class->IsArrayClass();
+    }
+    if (type_known_abstract != NULL) {
+      *type_known_abstract = resolved_class->IsAbstract() && !resolved_class->IsArrayClass();
+    }
+  } else {
+    stats_->TypeNeedsAccessCheck();
+  }
+  return result;
+}
+
+bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
+                                                            const DexFile& dex_file,
+                                                            uint32_t type_idx) {
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+  // Get type from dex cache assuming it was populated by the verifier.
+  mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+  if (resolved_class == NULL) {
+    stats_->TypeNeedsAccessCheck();
+    return false;  // Unknown class needs access checks.
+  }
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
+  mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
+  if (referrer_class == NULL) {
+    stats_->TypeNeedsAccessCheck();
+    return false;  // Incomplete referrer knowledge needs access check.
+  }
+  // Perform access and instantiable checks, will return true if access is ok or false if we're
+  // going to have to check this at runtime (for example for class loaders).
+  bool result = referrer_class->CanAccess(resolved_class) && resolved_class->IsInstantiable();
+  if (result) {
+    stats_->TypeDoesntNeedAccessCheck();
+  } else {
+    stats_->TypeNeedsAccessCheck();
+  }
+  return result;
+}
+
+static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
+                                                   mirror::DexCache* dex_cache,
+                                                   const DexCompilationUnit* mUnit)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // The passed dex_cache is a hint, sanity check before asking the class linker that will take a
+  // lock.
+  if (dex_cache->GetDexFile() != mUnit->GetDexFile()) {
+    dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+  }
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+  const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
+  return mUnit->GetClassLinker()->ResolveType(*mUnit->GetDexFile(), referrer_method_id.class_idx_,
+                                              dex_cache, class_loader);
+}
+
+static mirror::Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa,
+                                                                const DexCompilationUnit* mUnit,
+                                                                uint32_t field_idx)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+  return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache,
+                                               class_loader, false);
+}
+
+static mirror::AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
+                                                                          const DexCompilationUnit* mUnit,
+                                                                          uint32_t method_idx,
+                                                                          InvokeType type)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+  return mUnit->GetClassLinker()->ResolveMethod(*mUnit->GetDexFile(), method_idx, dex_cache,
+                                                class_loader, NULL, type);
+}
+
+bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+                                              int& field_offset, bool& is_volatile, bool is_put) {
+  ScopedObjectAccess soa(Thread::Current());
+  // Conservative defaults.
+  field_offset = -1;
+  is_volatile = true;
+  // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
+  mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
+  if (resolved_field != NULL && !resolved_field->IsStatic()) {
+    mirror::Class* referrer_class =
+        ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
+                                     mUnit);
+    if (referrer_class != NULL) {
+      mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+      bool access_ok = referrer_class->CanAccess(fields_class) &&
+                       referrer_class->CanAccessMember(fields_class,
+                                                       resolved_field->GetAccessFlags());
+      if (!access_ok) {
+        // The referring class can't access the resolved field, this may occur as a result of a
+        // protected field being made public by a sub-class. Resort to the dex file to determine
+        // the correct class for the access check.
+        const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
+        mirror::Class* dex_fields_class = mUnit->GetClassLinker()->ResolveType(dex_file,
+                                                         dex_file.GetFieldId(field_idx).class_idx_,
+                                                         referrer_class);
+        access_ok = referrer_class->CanAccess(dex_fields_class) &&
+                    referrer_class->CanAccessMember(dex_fields_class,
+                                                    resolved_field->GetAccessFlags());
+      }
+      bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
+          fields_class != referrer_class;
+      if (access_ok && !is_write_to_final_from_wrong_class) {
+        field_offset = resolved_field->GetOffset().Int32Value();
+        is_volatile = resolved_field->IsVolatile();
+        stats_->ResolvedInstanceField();
+        return true;  // Fast path.
+      }
+    }
+  }
+  // Clean up any exception left by field/type resolution
+  if (soa.Self()->IsExceptionPending()) {
+    soa.Self()->ClearException();
+  }
+  stats_->UnresolvedInstanceField();
+  return false;  // Incomplete knowledge needs slow path.
+}
+
+bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+                                            int& field_offset, int& ssb_index,
+                                            bool& is_referrers_class, bool& is_volatile,
+                                            bool is_put) {
+  ScopedObjectAccess soa(Thread::Current());
+  // Conservative defaults.
+  field_offset = -1;
+  ssb_index = -1;
+  is_referrers_class = false;
+  is_volatile = true;
+  // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
+  mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
+  if (resolved_field != NULL && resolved_field->IsStatic()) {
+    mirror::Class* referrer_class =
+        ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
+                                     mUnit);
+    if (referrer_class != NULL) {
+      mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+      if (fields_class == referrer_class) {
+        is_referrers_class = true;  // implies no worrying about class initialization
+        field_offset = resolved_field->GetOffset().Int32Value();
+        is_volatile = resolved_field->IsVolatile();
+        stats_->ResolvedLocalStaticField();
+        return true;  // fast path
+      } else {
+        bool access_ok = referrer_class->CanAccess(fields_class) &&
+                         referrer_class->CanAccessMember(fields_class,
+                                                         resolved_field->GetAccessFlags());
+        if (!access_ok) {
+          // The referring class can't access the resolved field, this may occur as a result of a
+          // protected field being made public by a sub-class. Resort to the dex file to determine
+          // the correct class for the access check. Don't change the field's class as that is
+          // used to identify the SSB.
+          const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
+          mirror::Class* dex_fields_class =
+              mUnit->GetClassLinker()->ResolveType(dex_file,
+                                                   dex_file.GetFieldId(field_idx).class_idx_,
+                                                   referrer_class);
+          access_ok = referrer_class->CanAccess(dex_fields_class) &&
+                      referrer_class->CanAccessMember(dex_fields_class,
+                                                      resolved_field->GetAccessFlags());
+        }
+        bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
+        if (access_ok && !is_write_to_final_from_wrong_class) {
+          // We have the resolved field, we must make it into a ssbIndex for the referrer
+          // in its static storage base (which may fail if it doesn't have a slot for it)
+          // TODO: for images we can elide the static storage base null check
+          // if we know there's a non-null entry in the image
+          mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+          if (fields_class->GetDexCache() == dex_cache) {
+            // common case where the dex cache of both the referrer and the field are the same,
+            // no need to search the dex file
+            ssb_index = fields_class->GetDexTypeIndex();
+            field_offset = resolved_field->GetOffset().Int32Value();
+            is_volatile = resolved_field->IsVolatile();
+            stats_->ResolvedStaticField();
+            return true;
+          }
+          // Search dex file for localized ssb index, may fail if field's class is a parent
+          // of the class mentioned in the dex file and there is no dex cache entry.
+          const DexFile::StringId* string_id =
+              mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
+          if (string_id != NULL) {
+            const DexFile::TypeId* type_id =
+               mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
+            if (type_id != NULL) {
+              // medium path, needs check of static storage base being initialized
+              ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
+              field_offset = resolved_field->GetOffset().Int32Value();
+              is_volatile = resolved_field->IsVolatile();
+              stats_->ResolvedStaticField();
+              return true;
+            }
+          }
+        }
+      }
+    }
+  }
+  // Clean up any exception left by field/type resolution
+  if (soa.Self()->IsExceptionPending()) {
+    soa.Self()->ClearException();
+  }
+  stats_->UnresolvedStaticField();
+  return false;  // Incomplete knowledge needs slow path.
+}
+
+void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+                                                   mirror::Class* referrer_class,
+                                                   mirror::AbstractMethod* method,
+                                                   uintptr_t& direct_code,
+                                                   uintptr_t& direct_method,
+                                                   bool update_stats) {
+  // For direct and static methods compute possible direct_code and direct_method values, ie
+  // an address for the Method* being invoked and an address of the code for that Method*.
+  // For interface calls compute a value for direct_method that is the interface method being
+  // invoked, so this can be passed to the out-of-line runtime support code.
+  direct_code = 0;
+  direct_method = 0;
+  if (compiler_backend_ == kPortable) {
+    if (sharp_type != kStatic && sharp_type != kDirect) {
+      return;
+    }
+  } else {
+    if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
+      return;
+    }
+  }
+  bool method_code_in_boot = method->GetDeclaringClass()->GetClassLoader() == NULL;
+  if (!method_code_in_boot) {
+    return;
+  }
+  bool has_clinit_trampoline = method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
+  if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+    // Ensure we run the clinit trampoline unless we are invoking a static method in the same class.
+    return;
+  }
+  if (update_stats) {
+    if (sharp_type != kInterface) {  // Interfaces always go via a trampoline.
+      stats_->DirectCallsToBoot(type);
+    }
+    stats_->DirectMethodsToBoot(type);
+  }
+  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
+  if (compiling_boot) {
+    if (support_boot_image_fixup_) {
+      MethodHelper mh(method);
+      if (IsImageClass(mh.GetDeclaringClassDescriptor())) {
+        // We can only branch directly to Methods that are resolved in the DexCache.
+        // Otherwise we won't invoke the resolution trampoline.
+        direct_method = -1;
+        direct_code = -1;
+      }
+    }
+  } else {
+    if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
+      direct_method = reinterpret_cast<uintptr_t>(method);
+    }
+    direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+  }
+}
+
+bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
+                                       InvokeType& invoke_type,
+                                       MethodReference& target_method,
+                                       int& vtable_idx,
+                                       uintptr_t& direct_code, uintptr_t& direct_method,
+                                       bool update_stats) {
+  ScopedObjectAccess soa(Thread::Current());
+  vtable_idx = -1;
+  direct_code = 0;
+  direct_method = 0;
+  mirror::AbstractMethod* resolved_method =
+      ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method.dex_method_index,
+                                                 invoke_type);
+  if (resolved_method != NULL) {
+    // Don't try to fast-path if we don't understand the caller's class or this appears to be an
+    // Incompatible Class Change Error.
+    mirror::Class* referrer_class =
+        ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(),
+                                     mUnit);
+    bool icce = resolved_method->CheckIncompatibleClassChange(invoke_type);
+    if (referrer_class != NULL && !icce) {
+      mirror::Class* methods_class = resolved_method->GetDeclaringClass();
+      if (!referrer_class->CanAccess(methods_class) ||
+          !referrer_class->CanAccessMember(methods_class,
+                                           resolved_method->GetAccessFlags())) {
+        // The referring class can't access the resolved method, this may occur as a result of a
+        // protected method being made public by implementing an interface that re-declares the
+        // method public. Resort to the dex file to determine the correct class for the access
+        // check.
+        uint16_t class_idx =
+            target_method.dex_file->GetMethodId(target_method.dex_method_index).class_idx_;
+        methods_class = mUnit->GetClassLinker()->ResolveType(*target_method.dex_file,
+                                                             class_idx, referrer_class);
+      }
+      if (referrer_class->CanAccess(methods_class) &&
+          referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) {
+        const bool kEnableFinalBasedSharpening = true;
+        // Sharpen a virtual call into a direct call when the target is known not to have been
+        // overridden (ie is final).
+        bool can_sharpen_virtual_based_on_type =
+            (invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
+        // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
+        // the super class.
+        bool can_sharpen_super_based_on_type = (invoke_type == kSuper) &&
+            (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
+            resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
+            (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
+
+        if (kEnableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
+                                            can_sharpen_super_based_on_type)) {
+          // Sharpen a virtual call into a direct call. The method_idx is into referrer's
+          // dex cache, check that this resolved method is where we expect it.
+          CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method.dex_method_index) ==
+                resolved_method) << PrettyMethod(resolved_method);
+          if (update_stats) {
+            stats_->ResolvedMethod(invoke_type);
+            stats_->VirtualMadeDirect(invoke_type);
+          }
+          GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, resolved_method,
+                                        direct_code, direct_method, update_stats);
+          invoke_type = kDirect;
+          return true;
+        }
+        const bool kEnableVerifierBasedSharpening = true;
+        if (kEnableVerifierBasedSharpening && (invoke_type == kVirtual ||
+                                               invoke_type == kInterface)) {
+          // Did the verifier record a more precise invoke target based on its type information?
+          const MethodReference caller_method(mUnit->GetDexFile(), mUnit->GetDexMethodIndex());
+          const MethodReference* devirt_map_target =
+              verifier::MethodVerifier::GetDevirtMap(caller_method, dex_pc);
+          if (devirt_map_target != NULL) {
+            mirror::DexCache* target_dex_cache =
+                mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file);
+            mirror::ClassLoader* class_loader =
+                soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+            mirror::AbstractMethod* called_method =
+                mUnit->GetClassLinker()->ResolveMethod(*devirt_map_target->dex_file,
+                                                       devirt_map_target->dex_method_index,
+                                                       target_dex_cache, class_loader, NULL,
+                                                       kVirtual);
+            CHECK(called_method != NULL);
+            CHECK(!called_method->IsAbstract());
+            GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, called_method,
+                                          direct_code, direct_method, update_stats);
+            bool compiler_needs_dex_cache =
+                (GetCompilerBackend() == kPortable) ||
+                (GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
+                (direct_code == 0) || (direct_code == static_cast<unsigned int>(-1)) ||
+                (direct_method == 0) || (direct_method == static_cast<unsigned int>(-1));
+            if ((devirt_map_target->dex_file != target_method.dex_file) &&
+                compiler_needs_dex_cache) {
+              // We need to use the dex cache to find either the method or code, and the dex file
+              // containing the method isn't the one expected for the target method. Try to find
+              // the method within the expected target dex file.
+              // TODO: the -1 could be handled as direct code if the patching new the target dex
+              //       file.
+              // TODO: quick only supports direct pointers with Thumb2.
+              // TODO: the following should be factored into a common helper routine to find
+              //       one dex file's method within another.
+              const DexFile* dexfile = target_method.dex_file;
+              const DexFile* cm_dexfile =
+                  called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+              const DexFile::MethodId& cm_method_id =
+                  cm_dexfile->GetMethodId(called_method->GetDexMethodIndex());
+              const char* cm_descriptor = cm_dexfile->StringByTypeIdx(cm_method_id.class_idx_);
+              const DexFile::StringId* descriptor = dexfile->FindStringId(cm_descriptor);
+              if (descriptor != NULL) {
+                const DexFile::TypeId* type_id =
+                    dexfile->FindTypeId(dexfile->GetIndexForStringId(*descriptor));
+                if (type_id != NULL) {
+                  const char* cm_name = cm_dexfile->GetMethodName(cm_method_id);
+                  const DexFile::StringId* name = dexfile->FindStringId(cm_name);
+                  if (name != NULL) {
+                    uint16_t return_type_idx;
+                    std::vector<uint16_t> param_type_idxs;
+                    bool success = dexfile->CreateTypeList(&return_type_idx, &param_type_idxs,
+                                                           cm_dexfile->GetMethodSignature(cm_method_id));
+                    if (success) {
+                      const DexFile::ProtoId* sig =
+                          dexfile->FindProtoId(return_type_idx, param_type_idxs);
+                      if (sig != NULL) {
+                        const  DexFile::MethodId* method_id = dexfile->FindMethodId(*type_id,
+                                                                                    *name, *sig);
+                        if (method_id != NULL) {
+                          if (update_stats) {
+                            stats_->ResolvedMethod(invoke_type);
+                            stats_->VirtualMadeDirect(invoke_type);
+                            stats_->PreciseTypeDevirtualization();
+                          }
+                          target_method.dex_method_index = dexfile->GetIndexForMethodId(*method_id);
+                          invoke_type = kDirect;
+                          return true;
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+              // TODO: the stats for direct code and method are off as we failed to find the direct
+              //       method in the referring method's dex cache/file.
+            } else {
+              if (update_stats) {
+                stats_->ResolvedMethod(invoke_type);
+                stats_->VirtualMadeDirect(invoke_type);
+                stats_->PreciseTypeDevirtualization();
+              }
+              target_method = *devirt_map_target;
+              invoke_type = kDirect;
+              return true;
+            }
+          }
+        }
+        if (invoke_type == kSuper) {
+          // Unsharpened super calls are suspicious so go slow-path.
+        } else {
+          // Sharpening failed so generate a regular resolved method dispatch.
+          if (update_stats) {
+            stats_->ResolvedMethod(invoke_type);
+          }
+          if (invoke_type == kVirtual || invoke_type == kSuper) {
+            vtable_idx = resolved_method->GetMethodIndex();
+          }
+          GetCodeAndMethodForDirectCall(invoke_type, invoke_type, referrer_class, resolved_method,
+                                        direct_code, direct_method, update_stats);
+          return true;
+        }
+      }
+    }
+  }
+  // Clean up any exception left by method/invoke_type resolution
+  if (soa.Self()->IsExceptionPending()) {
+      soa.Self()->ClearException();
+  }
+  if (update_stats) {
+    stats_->UnresolvedMethod(invoke_type);
+  }
+  return false;  // Incomplete knowledge needs slow path.
+}
+
+bool CompilerDriver::IsSafeCast(const MethodReference& mr, uint32_t dex_pc) {
+  bool result = verifier::MethodVerifier::IsSafeCast(mr, dex_pc);
+  if (result) {
+    stats_->SafeCast();
+  } else {
+    stats_->NotASafeCast();
+  }
+  return result;
+}
+
+
+void CompilerDriver::AddCodePatch(const DexFile* dex_file,
+                            uint32_t referrer_method_idx,
+                            InvokeType referrer_invoke_type,
+                            uint32_t target_method_idx,
+                            InvokeType target_invoke_type,
+                            size_t literal_offset) {
+  MutexLock mu(Thread::Current(), compiled_methods_lock_);
+  code_to_patch_.push_back(new PatchInformation(dex_file,
+                                                referrer_method_idx,
+                                                referrer_invoke_type,
+                                                target_method_idx,
+                                                target_invoke_type,
+                                                literal_offset));
+}
+void CompilerDriver::AddMethodPatch(const DexFile* dex_file,
+                              uint32_t referrer_method_idx,
+                              InvokeType referrer_invoke_type,
+                              uint32_t target_method_idx,
+                              InvokeType target_invoke_type,
+                              size_t literal_offset) {
+  MutexLock mu(Thread::Current(), compiled_methods_lock_);
+  methods_to_patch_.push_back(new PatchInformation(dex_file,
+                                                   referrer_method_idx,
+                                                   referrer_invoke_type,
+                                                   target_method_idx,
+                                                   target_invoke_type,
+                                                   literal_offset));
+}
+
+class ParallelCompilationManager {
+ public:
+  typedef void Callback(const ParallelCompilationManager* manager, size_t index);
+
+  ParallelCompilationManager(ClassLinker* class_linker,
+                             jobject class_loader,
+                             CompilerDriver* compiler,
+                             const DexFile* dex_file,
+                             ThreadPool& thread_pool)
+    : class_linker_(class_linker),
+      class_loader_(class_loader),
+      compiler_(compiler),
+      dex_file_(dex_file),
+      thread_pool_(&thread_pool) {}
+
+  ClassLinker* GetClassLinker() const {
+    CHECK(class_linker_ != NULL);
+    return class_linker_;
+  }
+
+  jobject GetClassLoader() const {
+    return class_loader_;
+  }
+
+  CompilerDriver* GetCompiler() const {
+    CHECK(compiler_ != NULL);
+    return compiler_;
+  }
+
+  const DexFile* GetDexFile() const {
+    CHECK(dex_file_ != NULL);
+    return dex_file_;
+  }
+
+  void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
+    Thread* self = Thread::Current();
+    self->AssertNoPendingException();
+    CHECK_GT(work_units, 0U);
+
+    std::vector<ForAllClosure*> closures(work_units);
+    for (size_t i = 0; i < work_units; ++i) {
+      closures[i] = new ForAllClosure(this, begin + i, end, callback, work_units);
+      thread_pool_->AddTask(self, closures[i]);
+    }
+    thread_pool_->StartWorkers(self);
+
+    // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker
+    // thread destructor's called below perform join).
+    CHECK_NE(self->GetState(), kRunnable);
+
+    // Wait for all the worker threads to finish.
+    thread_pool_->Wait(self, true, false);
+  }
+
+ private:
+
+  class ForAllClosure : public Task {
+   public:
+    ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback,
+                  size_t stripe)
+        : manager_(manager),
+          begin_(begin),
+          end_(end),
+          callback_(callback),
+          stripe_(stripe)
+    {
+
+    }
+
+    virtual void Run(Thread* self) {
+      for (size_t i = begin_; i < end_; i += stripe_) {
+        callback_(manager_, i);
+        self->AssertNoPendingException();
+      }
+    }
+
+    virtual void Finalize() {
+      delete this;
+    }
+   private:
+    const ParallelCompilationManager* const manager_;
+    const size_t begin_;
+    const size_t end_;
+    const Callback* const callback_;
+    const size_t stripe_;
+  };
+
+  ClassLinker* const class_linker_;
+  const jobject class_loader_;
+  CompilerDriver* const compiler_;
+  const DexFile* const dex_file_;
+  ThreadPool* const thread_pool_;
+};
+
+// Return true if the class should be skipped during compilation. We
+// never skip classes in the boot class loader. However, if we have a
+// non-boot class loader and we can resolve the class in the boot
+// class loader, we do skip the class. This happens if an app bundles
+// classes found in the boot classpath. Since at runtime we will
+// select the class from the boot classpath, do not attempt to resolve
+// or compile it now.
+static bool SkipClass(mirror::ClassLoader* class_loader,
+                      const DexFile& dex_file,
+                      const DexFile::ClassDef& class_def)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (class_loader == NULL) {
+    return false;
+  }
+  const char* descriptor = dex_file.GetClassDescriptor(class_def);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
+  if (klass == NULL) {
+    Thread* self = Thread::Current();
+    CHECK(self->IsExceptionPending());
+    self->ClearException();
+    return false;
+  }
+  return true;
+}
+
+static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager, size_t class_def_index)
+    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
+  const DexFile& dex_file = *manager->GetDexFile();
+
+  // Method and Field are the worst. We can't resolve without either
+  // context from the code use (to disambiguate virtual vs direct
+  // method and instance vs static field) or from class
+  // definitions. While the compiler will resolve what it can as it
+  // needs it, here we try to resolve fields and methods used in class
+  // definitions, since many of them many never be referenced by
+  // generated code.
+  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+  if (SkipClass(class_loader, dex_file, class_def)) {
+    return;
+  }
+
+  // Note the class_data pointer advances through the headers,
+  // static fields, instance fields, direct methods, and virtual
+  // methods.
+  const byte* class_data = dex_file.GetClassData(class_def);
+  if (class_data == NULL) {
+    // empty class such as a marker interface
+    return;
+  }
+  Thread* self = Thread::Current();
+  ClassLinker* class_linker = manager->GetClassLinker();
+  mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
+  ClassDataItemIterator it(dex_file, class_data);
+  while (it.HasNextStaticField()) {
+    mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
+                                                      class_loader, true);
+    if (field == NULL) {
+      CHECK(self->IsExceptionPending());
+      self->ClearException();
+    }
+    it.Next();
+  }
+  // If an instance field is final then we need to have a barrier on the return, static final
+  // fields are assigned within the lock held for class initialization.
+  bool requires_constructor_barrier = false;
+  while (it.HasNextInstanceField()) {
+    if ((it.GetMemberAccessFlags() & kAccFinal) != 0) {
+      requires_constructor_barrier = true;
+    }
+
+    mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
+                                                      class_loader, false);
+    if (field == NULL) {
+      CHECK(self->IsExceptionPending());
+      self->ClearException();
+    }
+    it.Next();
+  }
+  if (requires_constructor_barrier) {
+    manager->GetCompiler()->AddRequiresConstructorBarrier(soa.Self(), manager->GetDexFile(),
+                                                          class_def_index);
+  }
+  while (it.HasNextDirectMethod()) {
+    mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
+                                                                 dex_cache, class_loader, NULL,
+                                                                 it.GetMethodInvokeType(class_def));
+    if (method == NULL) {
+      CHECK(self->IsExceptionPending());
+      self->ClearException();
+    }
+    it.Next();
+  }
+  while (it.HasNextVirtualMethod()) {
+    mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
+                                                                 dex_cache, class_loader, NULL,
+                                                                 it.GetMethodInvokeType(class_def));
+    if (method == NULL) {
+      CHECK(self->IsExceptionPending());
+      self->ClearException();
+    }
+    it.Next();
+  }
+  DCHECK(!it.HasNext());
+}
+
+static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx)
+    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  // Class derived values are more complicated, they require the linker and loader.
+  ScopedObjectAccess soa(Thread::Current());
+  ClassLinker* class_linker = manager->GetClassLinker();
+  const DexFile& dex_file = *manager->GetDexFile();
+  mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
+  mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+
+  if (klass == NULL) {
+    CHECK(soa.Self()->IsExceptionPending());
+    Thread::Current()->ClearException();
+  }
+}
+
+void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
+                                    ThreadPool& thread_pool, TimingLogger& timings) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+  // TODO: we could resolve strings here, although the string table is largely filled with class
+  //       and method names.
+
+  ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
+  context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
+  timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types");
+
+  context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
+  timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields");
+}
+
+void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                            ThreadPool& thread_pool, TimingLogger& timings) {
+  for (size_t i = 0; i != dex_files.size(); ++i) {
+    const DexFile* dex_file = dex_files[i];
+    CHECK(dex_file != NULL);
+    VerifyDexFile(class_loader, *dex_file, thread_pool, timings);
+  }
+}
+
+static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index)
+    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  ScopedObjectAccess soa(Thread::Current());
+  const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index);
+  const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def);
+  mirror::Class* klass =
+      manager->GetClassLinker()->FindClass(descriptor,
+                                           soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()));
+  if (klass == NULL) {
+    CHECK(soa.Self()->IsExceptionPending());
+    soa.Self()->ClearException();
+
+    /*
+     * At compile time, we can still structurally verify the class even if FindClass fails.
+     * This is to ensure the class is structurally sound for compilation. An unsound class
+     * will be rejected by the verifier and later skipped during compilation in the compiler.
+     */
+    mirror::DexCache* dex_cache =  manager->GetClassLinker()->FindDexCache(*manager->GetDexFile());
+    std::string error_msg;
+    if (verifier::MethodVerifier::VerifyClass(manager->GetDexFile(),
+                                              dex_cache,
+                                              soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()),
+                                              class_def_index, error_msg, true) ==
+                                                  verifier::MethodVerifier::kHardFailure) {
+      const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index);
+      LOG(ERROR) << "Verification failed on class "
+                 << PrettyDescriptor(manager->GetDexFile()->GetClassDescriptor(class_def))
+                 << " because: " << error_msg;
+    }
+    return;
+  }
+  CHECK(klass->IsResolved()) << PrettyClass(klass);
+  manager->GetClassLinker()->VerifyClass(klass);
+
+  if (klass->IsErroneous()) {
+    // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+    CHECK(soa.Self()->IsExceptionPending());
+    soa.Self()->ClearException();
+  }
+
+  CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
+      << PrettyDescriptor(klass) << ": state=" << klass->GetStatus();
+  soa.Self()->AssertNoPendingException();
+}
+
+void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
+                                   ThreadPool& thread_pool, TimingLogger& timings) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
+  context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
+  timings.AddSplit("Verify " + dex_file.GetLocation());
+}
+
+static const char* class_initializer_black_list[] = {
+  "Landroid/app/ActivityThread;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/bluetooth/BluetoothAudioGateway;", // Calls android.bluetooth.BluetoothAudioGateway.classInitNative().
+  "Landroid/bluetooth/HeadsetBase;", // Calls android.bluetooth.HeadsetBase.classInitNative().
+  "Landroid/content/res/CompatibilityInfo;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int.
+  "Landroid/content/res/CompatibilityInfo$1;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int.
+  "Landroid/content/UriMatcher;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/database/CursorWindow;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int.
+  "Landroid/database/sqlite/SQLiteConnection;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/database/sqlite/SQLiteConnection$Operation;", // Requires SimpleDateFormat -> java.util.Locale.
+  "Landroid/database/sqlite/SQLiteDatabaseConfiguration;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/database/sqlite/SQLiteDebug;", // Calls android.util.Log.isLoggable.
+  "Landroid/database/sqlite/SQLiteOpenHelper;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex.
+  "Landroid/database/sqlite/SQLiteQueryBuilder;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/drm/DrmManagerClient;", // Calls System.loadLibrary.
+  "Landroid/graphics/drawable/AnimatedRotateDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/AnimationDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/BitmapDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/ClipDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/ColorDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/Drawable;", // Requires android.graphics.Rect.
+  "Landroid/graphics/drawable/DrawableContainer;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/GradientDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/LayerDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/NinePatchDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/RotateDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/ScaleDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/ShapeDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/StateListDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/drawable/TransitionDrawable;", // Sub-class of Drawable.
+  "Landroid/graphics/Matrix;", // Calls android.graphics.Matrix.native_create.
+  "Landroid/graphics/Matrix$1;", // Requires Matrix.
+  "Landroid/graphics/PixelFormat;", // Calls android.graphics.PixelFormat.nativeClassInit().
+  "Landroid/graphics/Rect;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/graphics/SurfaceTexture;", // Calls android.graphics.SurfaceTexture.nativeClassInit().
+  "Landroid/graphics/Typeface;", // Calls android.graphics.Typeface.nativeCreate.
+  "Landroid/inputmethodservice/ExtractEditText;", // Requires android.widget.TextView.
+  "Landroid/media/AmrInputStream;", // Calls OsConstants.initConstants.
+  "Landroid/media/CamcorderProfile;", // Calls OsConstants.initConstants.
+  "Landroid/media/CameraProfile;", // Calls System.loadLibrary.
+  "Landroid/media/DecoderCapabilities;", // Calls System.loadLibrary.
+  "Landroid/media/EncoderCapabilities;", // Calls OsConstants.initConstants.
+  "Landroid/media/ExifInterface;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaCodec;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaCodecList;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaCrypto;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaDrm;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaExtractor;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaFile;", // Requires DecoderCapabilities.
+  "Landroid/media/MediaMetadataRetriever;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaMuxer;", // Calls OsConstants.initConstants.
+  "Landroid/media/MediaPlayer;", // Calls System.loadLibrary.
+  "Landroid/media/MediaRecorder;", // Calls System.loadLibrary.
+  "Landroid/media/MediaScanner;", // Calls System.loadLibrary.
+  "Landroid/media/ResampleInputStream;", // Calls OsConstants.initConstants.
+  "Landroid/media/SoundPool;", // Calls OsConstants.initConstants.
+  "Landroid/media/videoeditor/MediaArtistNativeHelper;", // Calls OsConstants.initConstants.
+  "Landroid/media/videoeditor/VideoEditorProfile;", // Calls OsConstants.initConstants.
+  "Landroid/mtp/MtpDatabase;", // Calls OsConstants.initConstants.
+  "Landroid/mtp/MtpDevice;", // Calls OsConstants.initConstants.
+  "Landroid/mtp/MtpServer;", // Calls OsConstants.initConstants.
+  "Landroid/net/NetworkInfo;", // Calls java.util.EnumMap.<init> -> java.lang.Enum.getSharedConstants -> System.identityHashCode.
+  "Landroid/net/Proxy;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/net/SSLCertificateSocketFactory;", // Requires javax.net.ssl.HttpsURLConnection.
+  "Landroid/net/Uri;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex.
+  "Landroid/net/Uri$AbstractHierarchicalUri;", // Requires Uri.
+  "Landroid/net/Uri$HierarchicalUri;", // Requires Uri.
+  "Landroid/net/Uri$OpaqueUri;", // Requires Uri.
+  "Landroid/net/Uri$StringUri;", // Requires Uri.
+  "Landroid/net/WebAddress;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/nfc/NdefRecord;", // Calls String.getBytes -> java.nio.charset.Charset.
+  "Landroid/opengl/EGL14;", // Calls android.opengl.EGL14._nativeClassInit.
+  "Landroid/opengl/GLES10;", // Calls android.opengl.GLES10._nativeClassInit.
+  "Landroid/opengl/GLES10Ext;", // Calls android.opengl.GLES10Ext._nativeClassInit.
+  "Landroid/opengl/GLES11;", // Requires GLES10.
+  "Landroid/opengl/GLES11Ext;", // Calls android.opengl.GLES11Ext._nativeClassInit.
+  "Landroid/opengl/GLES20;", // Calls android.opengl.GLES20._nativeClassInit.
+  "Landroid/opengl/GLUtils;", // Calls android.opengl.GLUtils.nativeClassInit.
+  "Landroid/os/Build;", // Calls -..-> android.os.SystemProperties.native_get.
+  "Landroid/os/Build$VERSION;", // Requires Build.
+  "Landroid/os/Debug;", // Requires android.os.Environment.
+  "Landroid/os/Environment;", // Calls System.getenv.
+  "Landroid/os/FileUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/os/StrictMode;", // Calls android.util.Log.isLoggable.
+  "Landroid/os/StrictMode$VmPolicy;", // Requires StrictMode.
+  "Landroid/os/Trace;", // Calls android.os.Trace.nativeGetEnabledTags.
+  "Landroid/os/UEventObserver;", // Calls Class.getSimpleName -> Class.isAnonymousClass -> Class.getDex.
+  "Landroid/provider/ContactsContract;", // Calls OsConstants.initConstants.
+  "Landroid/provider/Settings$Global;", // Calls OsConstants.initConstants.
+  "Landroid/provider/Settings$Secure;", // Requires android.net.Uri.
+  "Landroid/provider/Settings$System;", // Requires android.net.Uri.
+  "Landroid/renderscript/RenderScript;", // Calls System.loadLibrary.
+  "Landroid/server/BluetoothService;", // Calls android.server.BluetoothService.classInitNative.
+  "Landroid/server/BluetoothEventLoop;", // Calls android.server.BluetoothEventLoop.classInitNative.
+  "Landroid/telephony/PhoneNumberUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/telephony/TelephonyManager;", // Calls OsConstants.initConstants.
+  "Landroid/text/AutoText;", // Requires android.util.DisplayMetrics -..-> android.os.SystemProperties.native_get_int.
+  "Landroid/text/Layout;", // Calls com.android.internal.util.ArrayUtils.emptyArray -> System.identityHashCode.
+  "Landroid/text/BoringLayout;", // Requires Layout.
+  "Landroid/text/DynamicLayout;", // Requires Layout.
+  "Landroid/text/Html$HtmlParser;", // Calls -..-> String.toLowerCase -> java.util.Locale.
+  "Landroid/text/StaticLayout;", // Requires Layout.
+  "Landroid/text/TextUtils;", // Requires android.util.DisplayMetrics.
+  "Landroid/util/DisplayMetrics;", // Calls SystemProperties.native_get_int.
+  "Landroid/util/Patterns;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/view/Choreographer;", // Calls SystemProperties.native_get_boolean.
+  "Landroid/util/Patterns;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/view/GLES20Canvas;", // Calls GLES20Canvas.nIsAvailable().
+  "Landroid/view/GLES20RecordingCanvas;", // Requires android.view.GLES20Canvas.
+  "Landroid/view/GestureDetector;", // Calls android.view.GLES20Canvas.nIsAvailable.
+  "Landroid/view/HardwareRenderer$Gl20Renderer;", // Requires SystemProperties.native_get.
+  "Landroid/view/HardwareRenderer$GlRenderer;", // Requires SystemProperties.native_get.
+  "Landroid/view/InputEventConsistencyVerifier;", // Requires android.os.Build.
+  "Landroid/view/Surface;", // Requires SystemProperties.native_get.
+  "Landroid/view/SurfaceControl;", // Calls OsConstants.initConstants.
+  "Landroid/view/animation/AlphaAnimation;", // Requires Animation.
+  "Landroid/view/animation/Animation;", // Calls SystemProperties.native_get_boolean.
+  "Landroid/view/animation/AnimationSet;", // Calls OsConstants.initConstants.
+  "Landroid/view/textservice/SpellCheckerSubtype;", // Calls Class.getDex().
+  "Landroid/webkit/JniUtil;", // Calls System.loadLibrary.
+  "Landroid/webkit/PluginManager;", // // Calls OsConstants.initConstants.
+  "Landroid/webkit/WebViewCore;", // Calls System.loadLibrary.
+  "Landroid/webkit/WebViewFactory$Preloader;",  // Calls to Class.forName.
+  "Landroid/webkit/WebViewInputDispatcher;", // Calls Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/webkit/URLUtil;", // Calls Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Landroid/widget/AutoCompleteTextView;", // Requires TextView.
+  "Landroid/widget/Button;", // Requires TextView.
+  "Landroid/widget/CheckBox;", // Requires TextView.
+  "Landroid/widget/CheckedTextView;", // Requires TextView.
+  "Landroid/widget/CompoundButton;", // Requires TextView.
+  "Landroid/widget/EditText;", // Requires TextView.
+  "Landroid/widget/NumberPicker;", // Requires java.util.Locale.
+  "Landroid/widget/ScrollBarDrawable;", // Sub-class of Drawable.
+  "Landroid/widget/SearchView$SearchAutoComplete;", // Requires TextView.
+  "Landroid/widget/Switch;", // Requires TextView.
+  "Landroid/widget/TextView;", // Calls Paint.<init> -> Paint.native_init.
+  "Lcom/android/i18n/phonenumbers/AsYouTypeFormatter;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Lcom/android/i18n/phonenumbers/MetadataManager;", // Calls OsConstants.initConstants.
+  "Lcom/android/i18n/phonenumbers/PhoneNumberMatcher;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Lcom/android/i18n/phonenumbers/PhoneNumberUtil;", // Requires java.util.logging.LogManager.
+  "Lcom/android/i18n/phonenumbers/geocoding/AreaCodeMap;", // Calls OsConstants.initConstants.
+  "Lcom/android/i18n/phonenumbers/geocoding/PhoneNumberOfflineGeocoder;", // Calls OsConstants.initConstants.
+  "Lcom/android/internal/os/SamplingProfilerIntegration;", // Calls SystemProperties.native_get_int.
+  "Lcom/android/internal/policy/impl/PhoneWindow;", // Calls android.os.Binder.init.
+  "Lcom/android/internal/view/menu/ActionMenuItemView;", // Requires TextView.
+  "Lcom/android/internal/widget/DialogTitle;", // Requires TextView.
+  "Lcom/android/org/bouncycastle/asn1/StreamUtil;", // Calls Runtime.getRuntime().maxMemory().
+  "Lcom/android/org/bouncycastle/asn1/pkcs/MacData;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/asn1/pkcs/RSASSAPSSparams;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/asn1/cms/SignedData;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/asn1/x509/GeneralSubtree;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/asn1/x9/X9ECParameters;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$MD5;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA1;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA256;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA384;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/bouncycastle/crypto/digests/OpenSSLDigest$SHA512;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/bouncycastle/crypto/engines/RSABlindedEngine;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/generators/DHKeyGeneratorHelper;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/generators/DHParametersGenerator;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/generators/DHParametersHelper;", // Calls System.getenv -> OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/crypto/generators/DSAKeyPairGenerator;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/generators/DSAParametersGenerator;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/crypto/generators/RSAKeyPairGenerator;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/dh/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/dsa/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$EC;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDH;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDHC;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECDSA;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi$ECMQV;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/ec/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/BCRSAPrivateCrtKey;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/BCRSAPrivateKey;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jcajce/provider/asymmetric/rsa/KeyPairGeneratorSpi;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jcajce/provider/keystore/pkcs12/PKCS12KeyStoreSpi$BCPKCS12KeyStore;", // Calls Thread.currentThread.
+  "Lcom/android/org/bouncycastle/jcajce/provider/keystore/pkcs12/PKCS12KeyStoreSpi;", // Calls Thread.currentThread.
+  "Lcom/android/org/bouncycastle/jce/PKCS10CertificationRequest;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jce/provider/CertBlacklist;", // Calls System.getenv -> OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/jce/provider/JCERSAPrivateCrtKey;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jce/provider/JCERSAPrivateKey;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/jce/provider/PKIXCertPathValidatorSpi;", // Calls System.getenv -> OsConstants.initConstants.
+  "Lcom/android/org/bouncycastle/math/ec/ECConstants;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/math/ec/Tnaf;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/util/BigIntegers;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/bouncycastle/x509/X509Util;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lcom/android/org/conscrypt/CipherSuite;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/FileClientSessionCache$CacheFile;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/HandshakeIODataStream;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/Logger;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/NativeCrypto;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLECKeyPairGenerator;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/OpenSSLEngine;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLMac$HmacMD5;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA1;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA256;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA384;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLMac$HmacSHA512;", // Calls native NativeCrypto.clinit().
+  "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$MD5;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA1;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA256;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA384;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLMessageDigestJDK$SHA512;", // Requires com.android.org.conscrypt.NativeCrypto.
+  "Lcom/android/org/conscrypt/OpenSSLX509CertPath;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/OpenSSLX509CertificateFactory;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/PRF;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/SSLSessionImpl;", // Calls OsConstants.initConstants.
+  "Lcom/android/org/conscrypt/TrustedCertificateStore;", // Calls System.getenv -> OsConstants.initConstants.
+  "Lcom/android/okhttp/ConnectionPool;", // Calls OsConstants.initConstants.
+  "Lcom/android/okhttp/OkHttpClient;", // Calls OsConstants.initConstants.
+  "Lcom/android/okhttp/internal/DiskLruCache;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Lcom/android/okhttp/internal/Util;", // Calls OsConstants.initConstants.
+  "Lcom/android/okhttp/internal/http/HttpsURLConnectionImpl;", // Calls VMClassLoader.getBootClassPathSize.
+  "Lcom/android/okhttp/internal/spdy/SpdyConnection;", // Calls OsConstants.initConstants.
+  "Lcom/android/okhttp/internal/spdy/SpdyReader;", // Calls OsConstants.initConstants.
+  "Lcom/android/okhttp/internal/tls/OkHostnameVerifier;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Lcom/google/android/gles_jni/EGLContextImpl;", // Calls com.google.android.gles_jni.EGLImpl._nativeClassInit.
+  "Lcom/google/android/gles_jni/EGLImpl;", // Calls com.google.android.gles_jni.EGLImpl._nativeClassInit.
+  "Lcom/google/android/gles_jni/GLImpl;", // Calls com.google.android.gles_jni.GLImpl._nativeClassInit.
+  "Lgov/nist/core/GenericObject;", // Calls OsConstants.initConstants.
+  "Lgov/nist/core/Host;", // Calls OsConstants.initConstants.
+  "Lgov/nist/core/HostPort;", // Calls OsConstants.initConstants.
+  "Lgov/nist/core/NameValue;", // Calls OsConstants.initConstants.
+  "Lgov/nist/core/net/DefaultNetworkLayer;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/Utils;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/AddressImpl;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/Authority;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/GenericURI;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/NetObject;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/SipUri;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/TelephoneNumber;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/address/UserInfo;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Accept;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AcceptEncoding;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AcceptLanguage;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AddressParametersHeader;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AlertInfoList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AllowEvents;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AllowEventsList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/AuthenticationInfo;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Authorization;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/CSeq;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/CallIdentifier;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Challenge;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ContactList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ContentEncoding;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ContentEncodingList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ContentLanguageList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ContentType;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Credentials;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ErrorInfoList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Expires;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/From;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/MimeVersion;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/NameMap;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Priority;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Protocol;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ProxyAuthenticate;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ProxyAuthenticateList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ProxyAuthorizationList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ProxyRequire;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ProxyRequireList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/RSeq;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/RecordRoute;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ReferTo;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/RequestLine;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Require;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/RetryAfter;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/SIPETag;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/SIPHeader;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/SIPHeaderNamesCache;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/StatusLine;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/SubscriptionState;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/TimeStamp;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/UserAgent;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Unsupported;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/Warning;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ViaList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/extensions/Join;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/extensions/References;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/extensions/Replaces;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PAccessNetworkInfo;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PAssertedIdentity;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PAssertedIdentityList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PAssociatedURI;",  // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PCalledPartyID;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PChargingVector;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PPreferredIdentity;",  // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PVisitedNetworkIDList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/PathList;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/SecurityAgree;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/SecurityClient;", // Calls OsConstants.initConstants.
+  "Lgov/nist/javax/sip/header/ims/ServiceRoute;", // Calls OsConstants.initConstants.
+  "Ljava/io/Console;", // Has FileDescriptor(s).
+  "Ljava/io/File;", // Calls to Random.<init> -> System.currentTimeMillis -> OsConstants.initConstants.
+  "Ljava/io/FileDescriptor;", // Requires libcore.io.OsConstants.
+  "Ljava/io/ObjectInputStream;", // Requires java.lang.ClassLoader$SystemClassLoader.
+  "Ljava/io/ObjectStreamClass;",  // Calls to Class.forName -> java.io.FileDescriptor.
+  "Ljava/io/ObjectStreamConstants;", // Instance of non-image class SerializablePermission.
+  "Ljava/lang/ClassLoader$SystemClassLoader;", // Calls System.getProperty -> OsConstants.initConstants.
+  "Ljava/lang/HexStringParser;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Ljava/lang/ProcessManager;", // Calls Thread.currentThread.
+  "Ljava/lang/Runtime;", // Calls System.getProperty -> OsConstants.initConstants.
+  "Ljava/lang/System;", // Calls OsConstants.initConstants.
+  "Ljava/math/BigDecimal;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Ljava/math/BigInteger;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Ljava/math/Primality;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Ljava/math/Multiplication;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Ljava/net/InetAddress;", // Requires libcore.io.OsConstants.
+  "Ljava/net/Inet4Address;", // Sub-class of InetAddress.
+  "Ljava/net/Inet6Address;", // Sub-class of InetAddress.
+  "Ljava/net/InetUnixAddress;", // Sub-class of InetAddress.
+  "Ljava/nio/charset/Charset;", // Calls Charset.getDefaultCharset -> System.getProperty -> OsConstants.initConstants.
+  "Ljava/nio/charset/CharsetICU;", // Sub-class of Charset.
+  "Ljava/nio/charset/Charsets;", // Calls Charset.forName.
+  "Ljava/nio/charset/StandardCharsets;", // Calls OsConstants.initConstants.
+  "Ljava/security/AlgorithmParameterGenerator;", // Calls OsConstants.initConstants.
+  "Ljava/security/KeyPairGenerator$KeyPairGeneratorImpl;", // Calls OsConstants.initConstants.
+  "Ljava/security/KeyPairGenerator;", // Calls OsConstants.initConstants.
+  "Ljava/security/Security;", // Tries to do disk IO for "security.properties".
+  "Ljava/security/spec/RSAKeyGenParameterSpec;", // java.math.NativeBN.BN_new()
+  "Ljava/sql/Date;", // Calls OsConstants.initConstants.
+  "Ljava/sql/DriverManager;", // Calls OsConstants.initConstants.
+  "Ljava/sql/Time;", // Calls OsConstants.initConstants.
+  "Ljava/sql/Timestamp;", // Calls OsConstants.initConstants.
+  "Ljava/util/Date;", // Calls Date.<init> -> System.currentTimeMillis -> OsConstants.initConstants.
+  "Ljava/util/ListResourceBundle;", // Calls OsConstants.initConstants.
+  "Ljava/util/Locale;", // Calls System.getProperty -> OsConstants.initConstants.
+  "Ljava/util/PropertyResourceBundle;", // Calls OsConstants.initConstants.
+  "Ljava/util/ResourceBundle;", // Calls OsConstants.initConstants.
+  "Ljava/util/ResourceBundle$MissingBundle;", // Calls OsConstants.initConstants.
+  "Ljava/util/Scanner;", // regex.Pattern.compileImpl.
+  "Ljava/util/SimpleTimeZone;", // Sub-class of TimeZone.
+  "Ljava/util/TimeZone;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+  "Ljava/util/concurrent/ConcurrentHashMap$Segment;", // Calls Runtime.getRuntime().availableProcessors().
+  "Ljava/util/concurrent/ConcurrentSkipListMap;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/Exchanger;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/ForkJoinPool;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/LinkedTransferQueue;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/Phaser;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/ScheduledThreadPoolExecutor;", // Calls AtomicLong.VMSupportsCS8()
+  "Ljava/util/concurrent/SynchronousQueue;", // Calls OsConstants.initConstants.
+  "Ljava/util/concurrent/atomic/AtomicLong;", // Calls AtomicLong.VMSupportsCS8()
+  "Ljava/util/logging/LogManager;", // Calls System.getProperty -> OsConstants.initConstants.
+  "Ljava/util/prefs/AbstractPreferences;", // Calls OsConstants.initConstants.
+  "Ljava/util/prefs/FilePreferencesImpl;", // Calls OsConstants.initConstants.
+  "Ljava/util/prefs/FilePreferencesFactoryImpl;", // Calls OsConstants.initConstants.
+  "Ljava/util/prefs/Preferences;", // Calls OsConstants.initConstants.
+  "Ljavax/crypto/KeyAgreement;", // Calls OsConstants.initConstants.
+  "Ljavax/crypto/KeyGenerator;", // Calls OsConstants.initConstants.
+  "Ljavax/security/cert/X509Certificate;", // Calls VMClassLoader.getBootClassPathSize.
+  "Ljavax/security/cert/X509Certificate$1;", // Calls VMClassLoader.getBootClassPathSize.
+  "Ljavax/microedition/khronos/egl/EGL10;", // Requires EGLContext.
+  "Ljavax/microedition/khronos/egl/EGLContext;", // Requires com.google.android.gles_jni.EGLImpl.
+  "Ljavax/net/ssl/HttpsURLConnection;", // Calls SSLSocketFactory.getDefault -> java.security.Security.getProperty.
+  "Ljavax/xml/datatype/DatatypeConstants;", // Calls OsConstants.initConstants.
+  "Ljavax/xml/datatype/FactoryFinder;", // Calls OsConstants.initConstants.
+  "Ljavax/xml/namespace/QName;", // Calls OsConstants.initConstants.
+  "Ljavax/xml/validation/SchemaFactoryFinder;", // Calls OsConstants.initConstants.
+  "Ljavax/xml/xpath/XPathConstants;", // Calls OsConstants.initConstants.
+  "Ljavax/xml/xpath/XPathFactoryFinder;", // Calls OsConstants.initConstants.
+  "Llibcore/icu/LocaleData;", // Requires java.util.Locale.
+  "Llibcore/icu/TimeZoneNames;", // Requires java.util.TimeZone.
+  "Llibcore/io/IoUtils;",  // Calls Random.<init> -> System.currentTimeMillis -> FileDescriptor -> OsConstants.initConstants.
+  "Llibcore/io/OsConstants;", // Platform specific.
+  "Llibcore/net/MimeUtils;", // Calls libcore.net.MimeUtils.getContentTypesPropertiesStream -> System.getProperty.
+  "Llibcore/reflect/Types;", // Calls OsConstants.initConstants.
+  "Llibcore/util/ZoneInfo;", // Sub-class of TimeZone.
+  "Llibcore/util/ZoneInfoDB;", // Calls System.getenv -> OsConstants.initConstants.
+  "Lorg/apache/commons/logging/LogFactory;", // Calls System.getProperty.
+  "Lorg/apache/commons/logging/impl/LogFactoryImpl;", // Calls OsConstants.initConstants.
+  "Lorg/apache/harmony/security/fortress/Services;", // Calls ClassLoader.getSystemClassLoader -> System.getProperty.
+  "Lorg/apache/harmony/security/provider/cert/X509CertFactoryImpl;", // Requires java.nio.charsets.Charsets.
+  "Lorg/apache/harmony/security/provider/crypto/RandomBitsSupplier;", // Requires java.io.File.
+  "Lorg/apache/harmony/security/utils/AlgNameMapper;", // Requires java.util.Locale.
+  "Lorg/apache/harmony/security/pkcs10/CertificationRequest;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/pkcs10/CertificationRequestInfo;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/pkcs7/AuthenticatedAttributes;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/pkcs7/SignedData;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/pkcs7/SignerInfo;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/pkcs8/PrivateKeyInfo;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/provider/crypto/SHA1PRNG_SecureRandomImpl;", // Calls OsConstants.initConstants.
+  "Lorg/apache/harmony/security/x501/AttributeTypeAndValue;", // Calls IntegralToString.convertInt -> Thread.currentThread.
+  "Lorg/apache/harmony/security/x501/DirectoryString;", // Requires BigInteger.
+  "Lorg/apache/harmony/security/x501/Name;", // Requires org.apache.harmony.security.x501.AttributeTypeAndValue.
+  "Lorg/apache/harmony/security/x509/AccessDescription;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/AuthorityKeyIdentifier;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/CRLDistributionPoints;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/Certificate;", // Requires org.apache.harmony.security.x509.TBSCertificate.
+  "Lorg/apache/harmony/security/x509/CertificateIssuer;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/CertificateList;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/DistributionPoint;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/DistributionPointName;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/EDIPartyName;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lorg/apache/harmony/security/x509/GeneralName;", // Requires org.apache.harmony.security.x501.Name.
+  "Lorg/apache/harmony/security/x509/GeneralNames;", // Requires GeneralName.
+  "Lorg/apache/harmony/security/x509/GeneralSubtree;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/GeneralSubtrees;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/InfoAccessSyntax;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/IssuingDistributionPoint;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/NameConstraints;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/TBSCertList$RevokedCertificate;", // Calls NativeBN.BN_new().
+  "Lorg/apache/harmony/security/x509/TBSCertList;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/security/x509/TBSCertificate;",  // Requires org.apache.harmony.security.x501.Name.
+  "Lorg/apache/harmony/security/x509/Time;", // Calls native ... -> java.math.NativeBN.BN_new().
+  "Lorg/apache/harmony/security/x509/Validity;", // Requires x509.Time.
+  "Lorg/apache/harmony/security/x509/tsp/TSTInfo;", // Calls Thread.currentThread.
+  "Lorg/apache/harmony/xml/ExpatParser;", // Calls native ExpatParser.staticInitialize.
+  "Lorg/apache/harmony/xml/ExpatParser$EntityParser;", // Calls ExpatParser.staticInitialize.
+  "Lorg/apache/http/conn/params/ConnRouteParams;", // Requires java.util.Locale.
+  "Lorg/apache/http/conn/ssl/SSLSocketFactory;", // Calls java.security.Security.getProperty.
+  "Lorg/apache/http/conn/util/InetAddressUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
+};
+
+static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
+    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index);
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
+  const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def);
+  mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader);
+  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
+  bool can_init_static_fields = compiling_boot &&
+      manager->GetCompiler()->IsImageClass(descriptor);
+  if (klass != NULL) {
+    // We don't want class initialization occurring on multiple threads due to deadlock problems.
+    // For example, a parent class is initialized (holding its lock) that refers to a sub-class
+    // in its static/class initializer causing it to try to acquire the sub-class' lock. While
+    // on a second thread the sub-class is initialized (holding its lock) after first initializing
+    // its parents, whose locks are acquired. This leads to a parent-to-child and a child-to-parent
+    // lock ordering and consequent potential deadlock.
+    // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+    // than use a special Object for the purpose we use the Class of java.lang.Class.
+    ObjectLock lock1(soa.Self(), klass->GetClass());
+    // The lock required to initialize the class.
+    ObjectLock lock2(soa.Self(), klass);
+    // Only try to initialize classes that were successfully verified.
+    if (klass->IsVerified()) {
+      manager->GetClassLinker()->EnsureInitialized(klass, false, can_init_static_fields);
+      if (soa.Self()->IsExceptionPending()) {
+        soa.Self()->GetException(NULL)->Dump();
+      }
+      if (!klass->IsInitialized()) {
+        if (can_init_static_fields) {
+          bool is_black_listed = false;
+          for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) {
+            if (StringPiece(descriptor) == class_initializer_black_list[i]) {
+              is_black_listed = true;
+              break;
+            }
+          }
+          if (!is_black_listed) {
+            LOG(INFO) << "Initializing: " << descriptor;
+            if (StringPiece(descriptor) == "Ljava/lang/Void;"){
+              // Hand initialize j.l.Void to avoid Dex file operations in un-started runtime.
+              mirror::ObjectArray<mirror::Field>* fields = klass->GetSFields();
+              CHECK_EQ(fields->GetLength(), 1);
+              fields->Get(0)->SetObj(klass, manager->GetClassLinker()->FindPrimitiveClass('V'));
+              klass->SetStatus(mirror::Class::kStatusInitialized);
+            } else {
+              manager->GetClassLinker()->EnsureInitialized(klass, true, can_init_static_fields);
+            }
+            soa.Self()->AssertNoPendingException();
+          }
+        }
+      }
+      // If successfully initialized place in SSB array.
+      if (klass->IsInitialized()) {
+        klass->GetDexCache()->GetInitializedStaticStorage()->Set(klass->GetDexTypeIndex(), klass);
+      }
+    }
+    // Record the final class status if necessary.
+    mirror::Class::Status status = klass->GetStatus();
+    ClassReference ref(manager->GetDexFile(), class_def_index);
+    CompiledClass* compiled_class = manager->GetCompiler()->GetCompiledClass(ref);
+    if (compiled_class == NULL) {
+      compiled_class = new CompiledClass(status);
+      manager->GetCompiler()->RecordClassStatus(ref, compiled_class);
+    } else {
+      DCHECK_GE(status, compiled_class->GetStatus()) << descriptor;
+    }
+  }
+  // Clear any class not found or verification exceptions.
+  soa.Self()->ClearException();
+}
+
+void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
+                                       ThreadPool& thread_pool, TimingLogger& timings) {
+#ifndef NDEBUG
+  for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) {
+    const char* descriptor = class_initializer_black_list[i];
+    CHECK(IsValidDescriptor(descriptor)) << descriptor;
+  }
+#endif
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool);
+  context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count_);
+  timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation());
+}
+
+void CompilerDriver::InitializeClasses(jobject class_loader,
+                                       const std::vector<const DexFile*>& dex_files,
+                                       ThreadPool& thread_pool, TimingLogger& timings) {
+  for (size_t i = 0; i != dex_files.size(); ++i) {
+    const DexFile* dex_file = dex_files[i];
+    CHECK(dex_file != NULL);
+    InitializeClasses(class_loader, *dex_file, thread_pool, timings);
+  }
+}
+
+void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                       ThreadPool& thread_pool, TimingLogger& timings) {
+  for (size_t i = 0; i != dex_files.size(); ++i) {
+    const DexFile* dex_file = dex_files[i];
+    CHECK(dex_file != NULL);
+    CompileDexFile(class_loader, *dex_file, thread_pool, timings);
+  }
+}
+
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+  jobject jclass_loader = manager->GetClassLoader();
+  const DexFile& dex_file = *manager->GetDexFile();
+  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+    if (SkipClass(class_loader, dex_file, class_def)) {
+      return;
+    }
+  }
+  ClassReference ref(&dex_file, class_def_index);
+  // Skip compiling classes with generic verifier failures since they will still fail at runtime
+  if (verifier::MethodVerifier::IsClassRejected(ref)) {
+    return;
+  }
+  const byte* class_data = dex_file.GetClassData(class_def);
+  if (class_data == NULL) {
+    // empty class, probably a marker interface
+    return;
+  }
+  // Can we run DEX-to-DEX compiler on this class ?
+  bool allow_dex_compilation;
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+    allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, dex_file, class_def);
+  }
+  ClassDataItemIterator it(dex_file, class_data);
+  // Skip fields
+  while (it.HasNextStaticField()) {
+    it.Next();
+  }
+  while (it.HasNextInstanceField()) {
+    it.Next();
+  }
+  // Compile direct methods
+  int64_t previous_direct_method_idx = -1;
+  while (it.HasNextDirectMethod()) {
+    uint32_t method_idx = it.GetMemberIndex();
+    if (method_idx == previous_direct_method_idx) {
+      // smali can create dex files with two encoded_methods sharing the same method_idx
+      // http://code.google.com/p/smali/issues/detail?id=119
+      it.Next();
+      continue;
+    }
+    previous_direct_method_idx = method_idx;
+    manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+                                          it.GetMethodInvokeType(class_def), class_def_index,
+                                          method_idx, jclass_loader, dex_file, allow_dex_compilation);
+    it.Next();
+  }
+  // Compile virtual methods
+  int64_t previous_virtual_method_idx = -1;
+  while (it.HasNextVirtualMethod()) {
+    uint32_t method_idx = it.GetMemberIndex();
+    if (method_idx == previous_virtual_method_idx) {
+      // smali can create dex files with two encoded_methods sharing the same method_idx
+      // http://code.google.com/p/smali/issues/detail?id=119
+      it.Next();
+      continue;
+    }
+    previous_virtual_method_idx = method_idx;
+    manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+                                          it.GetMethodInvokeType(class_def), class_def_index,
+                                          method_idx, jclass_loader, dex_file, allow_dex_compilation);
+    it.Next();
+  }
+  DCHECK(!it.HasNext());
+}
+
+void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
+                                    ThreadPool& thread_pool, TimingLogger& timings) {
+  ParallelCompilationManager context(NULL, class_loader, this, &dex_file, thread_pool);
+  context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
+  timings.AddSplit("Compile " + dex_file.GetLocation());
+}
+
+void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                                   InvokeType invoke_type, uint32_t class_def_idx,
+                                   uint32_t method_idx, jobject class_loader,
+                                   const DexFile& dex_file,
+                                   bool allow_dex_to_dex_compilation) {
+  CompiledMethod* compiled_method = NULL;
+  uint64_t start_ns = NanoTime();
+
+  if ((access_flags & kAccNative) != 0) {
+    compiled_method = (*jni_compiler_)(*this, access_flags, method_idx, dex_file);
+    CHECK(compiled_method != NULL);
+  } else if ((access_flags & kAccAbstract) != 0) {
+  } else {
+    // In small mode we only compile image classes.
+    bool dont_compile = (Runtime::Current()->IsSmallMode() &&
+                         ((image_classes_.get() == NULL) || (image_classes_->size() == 0)));
+
+    // Don't compile class initializers, ever.
+    if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) {
+      dont_compile = true;
+    } else if (code_item->insns_size_in_code_units_ < Runtime::Current()->GetSmallModeMethodDexSizeLimit()) {
+    // Do compile small methods.
+      dont_compile = false;
+    }
+    if (!dont_compile) {
+      CompilerFn compiler = compiler_;
+#ifdef ART_SEA_IR_MODE
+      bool use_sea = Runtime::Current()->IsSeaIRMode();
+      use_sea = use_sea && (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+      if (use_sea) {
+        compiler = sea_ir_compiler_;
+      }
+#endif
+      compiled_method = (*compiler)(*this, code_item, access_flags, invoke_type, class_def_idx,
+                                    method_idx, class_loader, dex_file);
+      CHECK(compiled_method != NULL) << PrettyMethod(method_idx, dex_file);
+    } else if (allow_dex_to_dex_compilation) {
+      // TODO: add a mode to disable DEX-to-DEX compilation ?
+      compiled_method = (*dex_to_dex_compiler_)(*this, code_item, access_flags,
+                                                invoke_type, class_def_idx,
+                                                method_idx, class_loader, dex_file);
+      // No native code is generated.
+      CHECK(compiled_method == NULL) << PrettyMethod(method_idx, dex_file);
+    }
+  }
+  uint64_t duration_ns = NanoTime() - start_ns;
+#ifdef ART_USE_PORTABLE_COMPILER
+  const uint64_t kWarnMilliSeconds = 1000;
+#else
+  const uint64_t kWarnMilliSeconds = 100;
+#endif
+  if (duration_ns > MsToNs(kWarnMilliSeconds)) {
+    LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file)
+                 << " took " << PrettyDuration(duration_ns);
+  }
+
+  Thread* self = Thread::Current();
+  if (compiled_method != NULL) {
+    MethodReference ref(&dex_file, method_idx);
+    CHECK(GetCompiledMethod(ref) == NULL) << PrettyMethod(method_idx, dex_file);
+    {
+      MutexLock mu(self, compiled_methods_lock_);
+      compiled_methods_.Put(ref, compiled_method);
+    }
+    DCHECK(GetCompiledMethod(ref) != NULL) << PrettyMethod(method_idx, dex_file);
+  }
+
+  if (self->IsExceptionPending()) {
+    ScopedObjectAccess soa(self);
+    LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
+        << self->GetException(NULL)->Dump();
+  }
+}
+
+CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
+  MutexLock mu(Thread::Current(), compiled_classes_lock_);
+  ClassTable::const_iterator it = compiled_classes_.find(ref);
+  if (it == compiled_classes_.end()) {
+    return NULL;
+  }
+  CHECK(it->second != NULL);
+  return it->second;
+}
+
+CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
+  MutexLock mu(Thread::Current(), compiled_methods_lock_);
+  MethodTable::const_iterator it = compiled_methods_.find(ref);
+  if (it == compiled_methods_.end()) {
+    return NULL;
+  }
+  CHECK(it->second != NULL);
+  return it->second;
+}
+
+void CompilerDriver::SetBitcodeFileName(std::string const& filename) {
+  typedef void (*SetBitcodeFileNameFn)(CompilerDriver&, std::string const&);
+
+  SetBitcodeFileNameFn set_bitcode_file_name =
+    reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
+
+  set_bitcode_file_name(*this, filename);
+}
+
+
+void CompilerDriver::AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
+                                             size_t class_def_index) {
+  MutexLock mu(self, freezing_constructor_lock_);
+  freezing_constructor_classes_.insert(ClassReference(dex_file, class_def_index));
+}
+
+bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
+                                          size_t class_def_index) {
+  MutexLock mu(self, freezing_constructor_lock_);
+  return freezing_constructor_classes_.count(ClassReference(dex_file, class_def_index)) != 0;
+}
+
+bool CompilerDriver::WriteElf(const std::string& android_root,
+                              bool is_host,
+                              const std::vector<const art::DexFile*>& dex_files,
+                              std::vector<uint8_t>& oat_contents,
+                              art::File* file)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(ART_USE_PORTABLE_COMPILER)
+  return art::ElfWriterMclinker::Create(file, oat_contents, dex_files, android_root, is_host, *this);
+#else
+  return art::ElfWriterQuick::Create(file, oat_contents, dex_files, android_root, is_host, *this);
+#endif
+}
+void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set,
+                                                std::string& target_triple,
+                                                std::string& target_cpu,
+                                                std::string& target_attr) {
+  switch (instruction_set) {
+    case kThumb2:
+      target_triple = "thumb-none-linux-gnueabi";
+      target_cpu = "cortex-a9";
+      target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db";
+      break;
+
+    case kArm:
+      target_triple = "armv7-none-linux-gnueabi";
+      // TODO: Fix for Nexus S.
+      target_cpu = "cortex-a9";
+      // TODO: Fix for Xoom.
+      target_attr = "+v7,+neon,+neonfp,+vfp3,+db";
+      break;
+
+    case kX86:
+      target_triple = "i386-pc-linux-gnu";
+      target_attr = "";
+      break;
+
+    case kMips:
+      target_triple = "mipsel-unknown-linux";
+      target_attr = "mips32r2";
+      break;
+
+    default:
+      LOG(FATAL) << "Unknown instruction set: " << instruction_set;
+    }
+  }
+}  // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
new file mode 100644
index 0000000..d37f494
--- /dev/null
+++ b/compiler/driver/compiler_driver.h
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DRIVER_COMPILER_DRIVER_H_
+#define ART_SRC_COMPILER_DRIVER_COMPILER_DRIVER_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/mutex.h"
+#include "class_reference.h"
+#include "compiled_class.h"
+#include "compiled_method.h"
+#include "dex_file.h"
+#include "instruction_set.h"
+#include "invoke_type.h"
+#include "method_reference.h"
+#include "oat_file.h"
+#include "runtime.h"
+#include "safe_map.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class AOTCompilationStats;
+class ParallelCompilationManager;
+class DexCompilationUnit;
+class TimingLogger;
+
+enum CompilerBackend {
+  kQuick,
+  kPortable,
+  kNoBackend
+};
+
+// Thread-local storage compiler worker threads
+class CompilerTls {
+  public:
+    CompilerTls() : llvm_info_(NULL) {}
+    ~CompilerTls() {}
+
+    void* GetLLVMInfo() { return llvm_info_; }
+
+    void SetLLVMInfo(void* llvm_info) { llvm_info_ = llvm_info; }
+
+  private:
+    void* llvm_info_;
+};
+
+class CompilerDriver {
+ public:
+  typedef std::set<std::string> DescriptorSet;
+
+  // Create a compiler targeting the requested "instruction_set".
+  // "image" should be true if image specific optimizations should be
+  // enabled.  "image_classes" lets the compiler know what classes it
+  // can assume will be in the image, with NULL implying all available
+  // classes.
+  explicit CompilerDriver(CompilerBackend compiler_backend, InstructionSet instruction_set,
+                          bool image, DescriptorSet* image_classes,
+                          size_t thread_count, bool support_debugging,
+                          bool dump_stats, bool dump_timings);
+
+  ~CompilerDriver();
+
+  void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Compile a single Method
+  void CompileOne(const mirror::AbstractMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool IsDebuggingSupported() {
+    return support_debugging_;
+  }
+
+  InstructionSet GetInstructionSet() const {
+    return instruction_set_;
+  }
+
+  CompilerBackend GetCompilerBackend() const {
+    return compiler_backend_;
+  }
+
+  bool IsImage() const {
+    return image_;
+  }
+
+  DescriptorSet* GetImageClasses() const {
+    return image_classes_.get();
+  }
+
+  CompilerTls* GetTls();
+
+  // Generate the trampolines that are invoked by unresolved direct methods.
+  const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateInterpreterToQuickEntry() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  CompiledClass* GetCompiledClass(ClassReference ref) const
+      LOCKS_EXCLUDED(compiled_classes_lock_);
+
+  CompiledMethod* GetCompiledMethod(MethodReference ref) const
+      LOCKS_EXCLUDED(compiled_methods_lock_);
+
+  void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, size_t class_def_index);
+  bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, size_t class_def_index);
+
+  // Callbacks from compiler to see what runtime checks must be generated.
+
+  bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Are runtime access checks necessary in the compiled code?
+  bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
+                                  uint32_t type_idx, bool* type_known_final = NULL,
+                                  bool* type_known_abstract = NULL,
+                                  bool* equals_referrers_class = NULL)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Are runtime access and instantiable checks necessary in the code?
+  bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
+                                              uint32_t type_idx)
+     LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Can we fast path instance field access? Computes field's offset and volatility.
+  bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+                                int& field_offset, bool& is_volatile, bool is_put)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Can we fastpath static field access? Computes field's offset, volatility and whether the
+  // field is within the referrer (which can avoid checking class initialization).
+  bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+                              int& field_offset, int& ssb_index,
+                              bool& is_referrers_class, bool& is_volatile, bool is_put)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Can we fastpath a interface, super class or virtual method call? Computes method's vtable
+  // index.
+  bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
+                         InvokeType& type, MethodReference& target_method, int& vtable_idx,
+                         uintptr_t& direct_code, uintptr_t& direct_method, bool update_stats)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  bool IsSafeCast(const MethodReference& mr, uint32_t dex_pc);
+
+  // Record patch information for later fix up.
+  void AddCodePatch(const DexFile* dex_file,
+                    uint32_t referrer_method_idx,
+                    InvokeType referrer_invoke_type,
+                    uint32_t target_method_idx,
+                    InvokeType target_invoke_type,
+                    size_t literal_offset)
+      LOCKS_EXCLUDED(compiled_methods_lock_);
+  void AddMethodPatch(const DexFile* dex_file,
+                      uint32_t referrer_method_idx,
+                      InvokeType referrer_invoke_type,
+                      uint32_t target_method_idx,
+                      InvokeType target_invoke_type,
+                      size_t literal_offset)
+      LOCKS_EXCLUDED(compiled_methods_lock_);
+
+  void SetBitcodeFileName(std::string const& filename);
+
+  bool GetSupportBootImageFixup() const {
+    return support_boot_image_fixup_;
+  }
+
+  void SetSupportBootImageFixup(bool support_boot_image_fixup) {
+    support_boot_image_fixup_ = support_boot_image_fixup;
+  }
+
+
+  bool WriteElf(const std::string& android_root,
+                bool is_host,
+                const std::vector<const DexFile*>& dex_files,
+                std::vector<uint8_t>& oat_contents,
+                File* file);
+
+  // TODO: move to a common home for llvm helpers once quick/portable are merged
+  static void InstructionSetToLLVMTarget(InstructionSet instruction_set,
+                                         std::string& target_triple,
+                                         std::string& target_cpu,
+                                         std::string& target_attr);
+
+  void SetCompilerContext(void* compiler_context) {
+    compiler_context_ = compiler_context;
+  }
+
+  void* GetCompilerContext() const {
+    return compiler_context_;
+  }
+
+  size_t GetThreadCount() const {
+    return thread_count_;
+  }
+
+  class PatchInformation {
+   public:
+    const DexFile& GetDexFile() const {
+      return *dex_file_;
+    }
+    uint32_t GetReferrerMethodIdx() const {
+      return referrer_method_idx_;
+    }
+    InvokeType GetReferrerInvokeType() const {
+      return referrer_invoke_type_;
+    }
+    uint32_t GetTargetMethodIdx() const {
+      return target_method_idx_;
+    }
+    InvokeType GetTargetInvokeType() const {
+      return target_invoke_type_;
+    }
+    size_t GetLiteralOffset() const {;
+      return literal_offset_;
+    }
+
+   private:
+    PatchInformation(const DexFile* dex_file,
+                     uint32_t referrer_method_idx,
+                     InvokeType referrer_invoke_type,
+                     uint32_t target_method_idx,
+                     InvokeType target_invoke_type,
+                     size_t literal_offset)
+      : dex_file_(dex_file),
+        referrer_method_idx_(referrer_method_idx),
+        referrer_invoke_type_(referrer_invoke_type),
+        target_method_idx_(target_method_idx),
+        target_invoke_type_(target_invoke_type),
+        literal_offset_(literal_offset) {
+      CHECK(dex_file_ != NULL);
+    }
+
+    const DexFile* dex_file_;
+    uint32_t referrer_method_idx_;
+    InvokeType referrer_invoke_type_;
+    uint32_t target_method_idx_;
+    InvokeType target_invoke_type_;
+    size_t literal_offset_;
+
+    friend class CompilerDriver;
+    DISALLOW_COPY_AND_ASSIGN(PatchInformation);
+  };
+
+  const std::vector<const PatchInformation*>& GetCodeToPatch() const {
+    return code_to_patch_;
+  }
+  const std::vector<const PatchInformation*>& GetMethodsToPatch() const {
+    return methods_to_patch_;
+  }
+
+  // Checks if class specified by type_idx is one of the image_classes_
+  bool IsImageClass(const char* descriptor) const;
+
+  void RecordClassStatus(ClassReference ref, CompiledClass* compiled_class);
+
+ private:
+  // Compute constant code and method pointers when possible
+  void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+                                     mirror::Class* referrer_class,
+                                     mirror::AbstractMethod* method,
+                                     uintptr_t& direct_code, uintptr_t& direct_method,
+                                     bool update_stats)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                  ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  void LoadImageClasses(TimingLogger& timings);
+
+  // Attempt to resolve all type, methods, fields, and strings
+  // referenced from code in the dex file following PathClassLoader
+  // ordering semantics.
+  void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+               ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
+                      ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+              ThreadPool& thread_pool, TimingLogger& timings);
+  void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
+                     ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                         ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void InitializeClasses(jobject class_loader, const DexFile& dex_file,
+                         ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
+
+  void UpdateImageClasses(TimingLogger& timings);
+  static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+               ThreadPool& thread_pool, TimingLogger& timings);
+  void CompileDexFile(jobject class_loader, const DexFile& dex_file,
+                      ThreadPool& thread_pool, TimingLogger& timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                     InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx,
+                     jobject class_loader, const DexFile& dex_file,
+                     bool allow_dex_to_dex_compilation)
+      LOCKS_EXCLUDED(compiled_methods_lock_);
+
+  static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  std::vector<const PatchInformation*> code_to_patch_;
+  std::vector<const PatchInformation*> methods_to_patch_;
+
+  CompilerBackend compiler_backend_;
+
+  InstructionSet instruction_set_;
+
+  // All class references that require
+  mutable Mutex freezing_constructor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::set<ClassReference> freezing_constructor_classes_ GUARDED_BY(freezing_constructor_lock_);
+
+  typedef SafeMap<const ClassReference, CompiledClass*> ClassTable;
+  // All class references that this compiler has compiled.
+  mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
+
+  typedef SafeMap<const MethodReference, CompiledMethod*, MethodReferenceComparator> MethodTable;
+  // All method references that this compiler has compiled.
+  mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_);
+
+  const bool image_;
+
+  // If image_ is true, specifies the classes that will be included in
+  // the image. Note if image_classes_ is NULL, all classes are
+  // included in the image.
+  UniquePtr<DescriptorSet> image_classes_;
+
+  size_t thread_count_;
+  bool support_debugging_;
+  uint64_t start_ns_;
+
+  UniquePtr<AOTCompilationStats> stats_;
+
+  bool dump_stats_;
+  bool dump_timings_;
+
+  typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
+  typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
+
+  void* compiler_library_;
+
+  typedef CompiledMethod* (*CompilerFn)(CompilerDriver& driver,
+                                        const DexFile::CodeItem* code_item,
+                                        uint32_t access_flags, InvokeType invoke_type,
+                                        uint32_t class_dex_idx, uint32_t method_idx,
+                                        jobject class_loader, const DexFile& dex_file);
+  CompilerFn compiler_;
+#ifdef ART_SEA_IR_MODE
+  CompilerFn sea_ir_compiler_;
+#endif
+
+  CompilerFn dex_to_dex_compiler_;
+
+  void* compiler_context_;
+
+  typedef CompiledMethod* (*JniCompilerFn)(CompilerDriver& driver,
+                                           uint32_t access_flags, uint32_t method_idx,
+                                           const DexFile& dex_file);
+  JniCompilerFn jni_compiler_;
+
+  pthread_key_t tls_key_;
+
+  typedef void (*CompilerEnableAutoElfLoadingFn)(CompilerDriver& driver);
+  CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_;
+
+  typedef const void* (*CompilerGetMethodCodeAddrFn)
+      (const CompilerDriver& driver, const CompiledMethod* cm, const mirror::AbstractMethod* method);
+  CompilerGetMethodCodeAddrFn compiler_get_method_code_addr_;
+
+  bool support_boot_image_fixup_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DRIVER_COMPILER_DRIVER_H_
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
new file mode 100644
index 0000000..6a160f7
--- /dev/null
+++ b/compiler/driver/compiler_driver_test.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "driver/compiler_driver.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include "UniquePtr.h"
+#include "class_linker.h"
+#include "common_test.h"
+#include "dex_file.h"
+#include "gc/heap.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+class CompilerDriverTest : public CommonTest {
+ protected:
+  void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
+    compiler_driver_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader));
+    MakeAllExecutable(class_loader);
+  }
+
+  void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
+                      const char* signature, bool is_virtual)
+      LOCKS_EXCLUDED(Locks::mutator_lock_) {
+    CompileAll(class_loader);
+    Thread::Current()->TransitionFromSuspendedToRunnable();
+    bool started = runtime_->Start();
+    CHECK(started);
+    env_ = Thread::Current()->GetJniEnv();
+    class_ = env_->FindClass(class_name);
+    CHECK(class_ != NULL) << "Class not found: " << class_name;
+    if (is_virtual) {
+      mid_ = env_->GetMethodID(class_, method, signature);
+    } else {
+      mid_ = env_->GetStaticMethodID(class_, method, signature);
+    }
+    CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+  }
+
+  void MakeAllExecutable(jobject class_loader) {
+    const std::vector<const DexFile*>& class_path
+        = Runtime::Current()->GetCompileTimeClassPath(class_loader);
+    for (size_t i = 0; i != class_path.size(); ++i) {
+      const DexFile* dex_file = class_path[i];
+      CHECK(dex_file != NULL);
+      MakeDexFileExecutable(class_loader, *dex_file);
+    }
+  }
+
+  void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) {
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
+      const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+      const char* descriptor = dex_file.GetClassDescriptor(class_def);
+      ScopedObjectAccess soa(Thread::Current());
+      mirror::Class* c = class_linker->FindClass(descriptor, soa.Decode<mirror::ClassLoader*>(class_loader));
+      CHECK(c != NULL);
+      for (size_t i = 0; i < c->NumDirectMethods(); i++) {
+        MakeExecutable(c->GetDirectMethod(i));
+      }
+      for (size_t i = 0; i < c->NumVirtualMethods(); i++) {
+        MakeExecutable(c->GetVirtualMethod(i));
+      }
+    }
+  }
+
+  JNIEnv* env_;
+  jclass class_;
+  jmethodID mid_;
+};
+
+// Disabled due to 10 second runtime on host
+TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
+  CompileAll(NULL);
+
+  // All libcore references should resolve
+  ScopedObjectAccess soa(Thread::Current());
+  const DexFile* dex = java_lang_dex_file_;
+  mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex);
+  EXPECT_EQ(dex->NumStringIds(), dex_cache->NumStrings());
+  for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
+    const mirror::String* string = dex_cache->GetResolvedString(i);
+    EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+  }
+  EXPECT_EQ(dex->NumTypeIds(), dex_cache->NumResolvedTypes());
+  for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
+    mirror::Class* type = dex_cache->GetResolvedType(i);
+    EXPECT_TRUE(type != NULL) << "type_idx=" << i
+                              << " " << dex->GetTypeDescriptor(dex->GetTypeId(i));
+  }
+  EXPECT_EQ(dex->NumMethodIds(), dex_cache->NumResolvedMethods());
+  for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
+    mirror::AbstractMethod* method = dex_cache->GetResolvedMethod(i);
+    EXPECT_TRUE(method != NULL) << "method_idx=" << i
+                                << " " << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i))
+                                << " " << dex->GetMethodName(dex->GetMethodId(i));
+    EXPECT_TRUE(method->GetEntryPointFromCompiledCode() != NULL) << "method_idx=" << i
+                                           << " "
+                                           << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i))
+                                           << " " << dex->GetMethodName(dex->GetMethodId(i));
+  }
+  EXPECT_EQ(dex->NumFieldIds(), dex_cache->NumResolvedFields());
+  for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
+    mirror::Field* field = dex_cache->GetResolvedField(i);
+    EXPECT_TRUE(field != NULL) << "field_idx=" << i
+                               << " " << dex->GetFieldDeclaringClassDescriptor(dex->GetFieldId(i))
+                               << " " << dex->GetFieldName(dex->GetFieldId(i));
+  }
+
+  // TODO check Class::IsVerified for all classes
+
+  // TODO: check that all Method::GetCode() values are non-null
+}
+
+TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
+  TEST_DISABLED_FOR_PORTABLE();
+  jobject class_loader;
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z");
+    CompileDirectMethod(NULL, "java.lang.Object", "<init>", "()V");
+    class_loader = LoadDex("AbstractMethod");
+  }
+  ASSERT_TRUE(class_loader != NULL);
+  EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
+
+  // Create a jobj_ of ConcreteClass, NOT AbstractClass.
+  jclass c_class = env_->FindClass("ConcreteClass");
+  jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
+  jobject jobj_ = env_->NewObject(c_class, constructor);
+  ASSERT_TRUE(jobj_ != NULL);
+
+  // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
+  env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
+  EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE);
+  jthrowable exception = env_->ExceptionOccurred();
+  env_->ExceptionClear();
+  jclass jlame = env_->FindClass("java/lang/AbstractMethodError");
+  EXPECT_TRUE(env_->IsInstanceOf(exception, jlame));
+  Thread::Current()->ClearException();
+}
+
+// TODO: need check-cast test (when stub complete & we can throw/catch
+
+}  // namespace art
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
new file mode 100644
index 0000000..6ba338a
--- /dev/null
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_compilation_unit.h"
+
+#include "base/stringprintf.h"
+#include "dex/compiler_ir.h"
+#include "dex/mir_graph.h"
+#include "utils.h"
+
+namespace art {
+
+DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu)
+    : cu_(cu),
+      class_loader_(cu->class_loader),
+      class_linker_(cu->class_linker),
+      dex_file_(cu->dex_file),
+      code_item_(cu->code_item),
+      class_def_idx_(cu->class_def_idx),
+      dex_method_idx_(cu->method_idx),
+      access_flags_(cu->access_flags) {
+}
+
+DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu,
+                                       jobject class_loader,
+                                       ClassLinker* class_linker,
+                                       const DexFile& dex_file,
+                                       const DexFile::CodeItem* code_item,
+                                       uint32_t class_def_idx,
+                                       uint32_t method_idx,
+                                       uint32_t access_flags)
+    : cu_(cu),
+      class_loader_(class_loader),
+      class_linker_(class_linker),
+      dex_file_(&dex_file),
+      code_item_(code_item),
+      class_def_idx_(class_def_idx),
+      dex_method_idx_(method_idx),
+      access_flags_(access_flags) {
+}
+
+const std::string& DexCompilationUnit::GetSymbol() {
+  if (symbol_.empty()) {
+    symbol_ = "dex_";
+    symbol_ += MangleForJni(PrettyMethod(dex_method_idx_, *dex_file_));
+  }
+  return symbol_;
+}
+
+} // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
new file mode 100644
index 0000000..3c6129d
--- /dev/null
+++ b/compiler/driver/dex_compilation_unit.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DEX_COMPILATION_UNIT_H_
+#define ART_SRC_COMPILER_DEX_DEX_COMPILATION_UNIT_H_
+
+#include <stdint.h>
+
+#include "dex_file.h"
+#include "jni.h"
+
+namespace art {
+namespace mirror {
+class ClassLoader;
+class DexCache;
+}  // namespace mirror
+class ClassLinker;
+struct CompilationUnit;
+
+class DexCompilationUnit {
+ public:
+  DexCompilationUnit(CompilationUnit* cu);
+
+  DexCompilationUnit(CompilationUnit* cu, jobject class_loader, ClassLinker* class_linker,
+                     const DexFile& dex_file, const DexFile::CodeItem* code_item,
+                     uint32_t class_def_idx, uint32_t method_idx, uint32_t access_flags);
+
+  CompilationUnit* GetCompilationUnit() const {
+    return cu_;
+  }
+
+  jobject GetClassLoader() const {
+    return class_loader_;
+  }
+
+  ClassLinker* GetClassLinker() const {
+    return class_linker_;
+  }
+
+  const DexFile* GetDexFile() const {
+    return dex_file_;
+  }
+
+  uint32_t GetClassDefIndex() const {
+    return class_def_idx_;
+  }
+
+  uint32_t GetDexMethodIndex() const {
+    return dex_method_idx_;
+  }
+
+  const DexFile::CodeItem* GetCodeItem() const {
+    return code_item_;
+  }
+
+  const char* GetShorty() const {
+    const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+    return dex_file_->GetMethodShorty(method_id);
+  }
+
+  const char* GetShorty(uint32_t* shorty_len) const {
+    const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+    return dex_file_->GetMethodShorty(method_id, shorty_len);
+  }
+
+  uint32_t GetAccessFlags() const {
+    return access_flags_;
+  }
+
+  bool IsNative() const {
+    return ((access_flags_ & kAccNative) != 0);
+  }
+
+  bool IsStatic() const {
+    return ((access_flags_ & kAccStatic) != 0);
+  }
+
+  bool IsSynchronized() const {
+    return ((access_flags_ & kAccSynchronized) != 0);
+  }
+
+  const std::string& GetSymbol();
+
+ private:
+  CompilationUnit* const cu_;
+
+  const jobject class_loader_;
+
+  ClassLinker* const class_linker_;
+
+  const DexFile* const dex_file_;
+
+  const DexFile::CodeItem* const code_item_;
+  const uint32_t class_def_idx_;
+  const uint32_t dex_method_idx_;
+  const uint32_t access_flags_;
+
+  std::string symbol_;
+};
+
+} // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_DEX_COMPILATION_UNIT_H_
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
new file mode 100644
index 0000000..127bc85
--- /dev/null
+++ b/compiler/elf_fixup.cc
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_fixup.h"
+
+#include "base/logging.h"
+#include "base/stringprintf.h"
+#include "elf_file.h"
+#include "elf_writer.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+static const bool DEBUG_FIXUP = false;
+
+bool ElfFixup::Fixup(File* file, uintptr_t oat_data_begin) {
+  UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false));
+  CHECK(elf_file.get() != NULL);
+
+  // Lookup "oatdata" symbol address.
+  ::llvm::ELF::Elf32_Addr oatdata_address = ElfWriter::GetOatDataAddress(elf_file.get());
+  ::llvm::ELF::Elf32_Off base_address = oat_data_begin - oatdata_address;
+
+  if (!FixupDynamic(*elf_file.get(), base_address)) {
+      LOG(WARNING) << "Failed fo fixup .dynamic in " << file->GetPath();
+      return false;
+  }
+  if (!FixupSectionHeaders(*elf_file.get(), base_address)) {
+      LOG(WARNING) << "Failed fo fixup section headers in " << file->GetPath();
+      return false;
+  }
+  if (!FixupProgramHeaders(*elf_file.get(), base_address)) {
+      LOG(WARNING) << "Failed fo fixup program headers in " << file->GetPath();
+      return false;
+  }
+  if (!FixupSymbols(*elf_file.get(), base_address, true)) {
+      LOG(WARNING) << "Failed fo fixup .dynsym in " << file->GetPath();
+      return false;
+  }
+  if (!FixupSymbols(*elf_file.get(), base_address, false)) {
+      LOG(WARNING) << "Failed fo fixup .symtab in " << file->GetPath();
+      return false;
+  }
+  if (!FixupRelocations(*elf_file.get(), base_address)) {
+      LOG(WARNING) << "Failed fo fixup .rel.dyn in " << file->GetPath();
+      return false;
+  }
+  return true;
+}
+
+// MIPS seems to break the rules d_val vs d_ptr even though their values are between DT_LOPROC and DT_HIPROC
+#define DT_MIPS_RLD_VERSION  0x70000001 // d_val
+#define DT_MIPS_TIME_STAMP   0x70000002 // d_val
+#define DT_MIPS_ICHECKSUM    0x70000003 // d_val
+#define DT_MIPS_IVERSION     0x70000004 // d_val
+#define DT_MIPS_FLAGS        0x70000005 // d_val
+#define DT_MIPS_BASE_ADDRESS 0x70000006 // d_ptr
+#define DT_MIPS_CONFLICT     0x70000008 // d_ptr
+#define DT_MIPS_LIBLIST      0x70000009 // d_ptr
+#define DT_MIPS_LOCAL_GOTNO  0x7000000A // d_val
+#define DT_MIPS_CONFLICTNO   0x7000000B // d_val
+#define DT_MIPS_LIBLISTNO    0x70000010 // d_val
+#define DT_MIPS_SYMTABNO     0x70000011 // d_val
+#define DT_MIPS_UNREFEXTNO   0x70000012 // d_val
+#define DT_MIPS_GOTSYM       0x70000013 // d_val
+#define DT_MIPS_HIPAGENO     0x70000014 // d_val
+#define DT_MIPS_RLD_MAP      0x70000016 // d_ptr
+
+bool ElfFixup::FixupDynamic(ElfFile& elf_file, uintptr_t base_address) {
+  // TODO: C++0x auto.
+  for (::llvm::ELF::Elf32_Word i = 0; i < elf_file.GetDynamicNum(); i++) {
+    ::llvm::ELF::Elf32_Dyn& elf_dyn = elf_file.GetDynamic(i);
+    ::llvm::ELF::Elf32_Word d_tag = elf_dyn.d_tag;
+    bool elf_dyn_needs_fixup = false;
+    switch (d_tag) {
+      // case 1: well known d_tag values that imply Elf32_Dyn.d_un contains an address in d_ptr
+      case ::llvm::ELF::DT_PLTGOT:
+      case ::llvm::ELF::DT_HASH:
+      case ::llvm::ELF::DT_STRTAB:
+      case ::llvm::ELF::DT_SYMTAB:
+      case ::llvm::ELF::DT_RELA:
+      case ::llvm::ELF::DT_INIT:
+      case ::llvm::ELF::DT_FINI:
+      case ::llvm::ELF::DT_REL:
+      case ::llvm::ELF::DT_DEBUG:
+      case ::llvm::ELF::DT_JMPREL: {
+        elf_dyn_needs_fixup = true;
+        break;
+      }
+      // d_val or ignored values
+      case ::llvm::ELF::DT_NULL:
+      case ::llvm::ELF::DT_NEEDED:
+      case ::llvm::ELF::DT_PLTRELSZ:
+      case ::llvm::ELF::DT_RELASZ:
+      case ::llvm::ELF::DT_RELAENT:
+      case ::llvm::ELF::DT_STRSZ:
+      case ::llvm::ELF::DT_SYMENT:
+      case ::llvm::ELF::DT_SONAME:
+      case ::llvm::ELF::DT_RPATH:
+      case ::llvm::ELF::DT_SYMBOLIC:
+      case ::llvm::ELF::DT_RELSZ:
+      case ::llvm::ELF::DT_RELENT:
+      case ::llvm::ELF::DT_PLTREL:
+      case ::llvm::ELF::DT_TEXTREL:
+      case ::llvm::ELF::DT_BIND_NOW:
+      case ::llvm::ELF::DT_INIT_ARRAYSZ:
+      case ::llvm::ELF::DT_FINI_ARRAYSZ:
+      case ::llvm::ELF::DT_RUNPATH:
+      case ::llvm::ELF::DT_FLAGS: {
+        break;
+      }
+      // boundary values that should not be used
+      case ::llvm::ELF::DT_ENCODING:
+      case ::llvm::ELF::DT_LOOS:
+      case ::llvm::ELF::DT_HIOS:
+      case ::llvm::ELF::DT_LOPROC:
+      case ::llvm::ELF::DT_HIPROC: {
+        LOG(FATAL) << "Illegal d_tag value 0x" << std::hex << d_tag;
+        break;
+      }
+      default: {
+        // case 2: "regular" DT_* ranges where even d_tag values imply an address in d_ptr
+        if ((::llvm::ELF::DT_ENCODING  < d_tag && d_tag < ::llvm::ELF::DT_LOOS)
+            || (::llvm::ELF::DT_LOOS   < d_tag && d_tag < ::llvm::ELF::DT_HIOS)
+            || (::llvm::ELF::DT_LOPROC < d_tag && d_tag < ::llvm::ELF::DT_HIPROC)) {
+          // Special case for MIPS which breaks the regular rules between DT_LOPROC and DT_HIPROC
+          if (elf_file.GetHeader().e_machine == ::llvm::ELF::EM_MIPS) {
+            switch (d_tag) {
+              case DT_MIPS_RLD_VERSION:
+              case DT_MIPS_TIME_STAMP:
+              case DT_MIPS_ICHECKSUM:
+              case DT_MIPS_IVERSION:
+              case DT_MIPS_FLAGS:
+              case DT_MIPS_LOCAL_GOTNO:
+              case DT_MIPS_CONFLICTNO:
+              case DT_MIPS_LIBLISTNO:
+              case DT_MIPS_SYMTABNO:
+              case DT_MIPS_UNREFEXTNO:
+              case DT_MIPS_GOTSYM:
+              case DT_MIPS_HIPAGENO: {
+                break;
+              }
+              case DT_MIPS_BASE_ADDRESS:
+              case DT_MIPS_CONFLICT:
+              case DT_MIPS_LIBLIST:
+              case DT_MIPS_RLD_MAP: {
+                elf_dyn_needs_fixup = true;
+                break;
+              }
+              default: {
+                LOG(FATAL) << "Unknown MIPS d_tag value 0x" << std::hex << d_tag;
+                break;
+              }
+            }
+          } else if ((elf_dyn.d_tag % 2) == 0) {
+            elf_dyn_needs_fixup = true;
+          }
+        } else {
+          LOG(FATAL) << "Unknown d_tag value 0x" << std::hex << d_tag;
+        }
+        break;
+      }
+    }
+    if (elf_dyn_needs_fixup) {
+      uint32_t d_ptr = elf_dyn.d_un.d_ptr;
+      if (DEBUG_FIXUP) {
+        LOG(INFO) << StringPrintf("In %s moving Elf32_Dyn[%d] from 0x%08x to 0x%08x",
+                                  elf_file.GetFile().GetPath().c_str(), i,
+                                  d_ptr, d_ptr + base_address);
+      }
+      d_ptr += base_address;
+      elf_dyn.d_un.d_ptr = d_ptr;
+    }
+  }
+  return true;
+}
+
+bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) {
+  for (::llvm::ELF::Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
+    ::llvm::ELF::Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
+    // 0 implies that the section will not exist in the memory of the process
+    if (sh.sh_addr == 0) {
+      continue;
+    }
+    if (DEBUG_FIXUP) {
+      LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08x",
+                                elf_file.GetFile().GetPath().c_str(), i,
+                                sh.sh_addr, sh.sh_addr + base_address);
+    }
+    sh.sh_addr += base_address;
+  }
+  return true;
+}
+
+bool ElfFixup::FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address) {
+  // TODO: ELFObjectFile doesn't have give to Elf32_Phdr, so we do that ourselves for now.
+  for (::llvm::ELF::Elf32_Word i = 0; i < elf_file.GetProgramHeaderNum(); i++) {
+    ::llvm::ELF::Elf32_Phdr& ph = elf_file.GetProgramHeader(i);
+    CHECK_EQ(ph.p_vaddr, ph.p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
+    CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+            << elf_file.GetFile().GetPath() << " i=" << i;
+    if (DEBUG_FIXUP) {
+      LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08x",
+                                elf_file.GetFile().GetPath().c_str(), i,
+                                ph.p_vaddr, ph.p_vaddr + base_address);
+    }
+    ph.p_vaddr += base_address;
+    ph.p_paddr += base_address;
+    CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+            << elf_file.GetFile().GetPath() << " i=" << i;
+  }
+  return true;
+}
+
+bool ElfFixup::FixupSymbols(ElfFile& elf_file, uintptr_t base_address, bool dynamic) {
+  ::llvm::ELF::Elf32_Word section_type = dynamic ? ::llvm::ELF::SHT_DYNSYM : ::llvm::ELF::SHT_SYMTAB;
+  // TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
+  ::llvm::ELF::Elf32_Shdr* symbol_section = elf_file.FindSectionByType(section_type);
+  if (symbol_section == NULL) {
+    // file is missing optional .symtab
+    CHECK(!dynamic) << elf_file.GetFile().GetPath();
+    return true;
+  }
+  for (uint32_t i = 0; i < elf_file.GetSymbolNum(*symbol_section); i++) {
+    ::llvm::ELF::Elf32_Sym& symbol = elf_file.GetSymbol(section_type, i);
+    if (symbol.st_value != 0) {
+      if (DEBUG_FIXUP) {
+        LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08x",
+                                  elf_file.GetFile().GetPath().c_str(), i,
+                                  symbol.st_value, symbol.st_value + base_address);
+      }
+      symbol.st_value += base_address;
+    }
+  }
+  return true;
+}
+
+bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) {
+  for (llvm::ELF::Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
+    llvm::ELF::Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
+    if (sh.sh_type == llvm::ELF::SHT_REL) {
+      for (uint32_t i = 0; i < elf_file.GetRelNum(sh); i++) {
+        llvm::ELF::Elf32_Rel& rel = elf_file.GetRel(sh, i);
+        if (DEBUG_FIXUP) {
+          LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08x",
+                                    elf_file.GetFile().GetPath().c_str(), i,
+                                    rel.r_offset, rel.r_offset + base_address);
+        }
+        rel.r_offset += base_address;
+      }
+    } else if (sh.sh_type == llvm::ELF::SHT_RELA) {
+      for (uint32_t i = 0; i < elf_file.GetRelaNum(sh); i++) {
+        llvm::ELF::Elf32_Rela& rela = elf_file.GetRela(sh, i);
+        if (DEBUG_FIXUP) {
+          LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08x",
+                                    elf_file.GetFile().GetPath().c_str(), i,
+                                    rela.r_offset, rela.r_offset + base_address);
+        }
+        rela.r_offset += base_address;
+      }
+    }
+  }
+  return true;
+}
+
+}  // namespace art
diff --git a/compiler/elf_fixup.h b/compiler/elf_fixup.h
new file mode 100644
index 0000000..79c45c1
--- /dev/null
+++ b/compiler/elf_fixup.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ELF_FIXUP_H_
+#define ART_SRC_ELF_FIXUP_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "os.h"
+
+namespace art {
+
+class ElfFile;
+
+class ElfFixup {
+ public:
+  // Fixup an ELF file so that that oat header will be loaded at oat_begin.
+  // Returns true on success, false on failure.
+  static bool Fixup(File* file, uintptr_t oat_data_begin);
+
+ private:
+  // Fixup .dynamic d_ptr values for the expected base_address.
+  static bool FixupDynamic(ElfFile& elf_file, uintptr_t base_address);
+
+  // Fixup Elf32_Shdr p_vaddr to load at the desired address.
+  static bool FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address);
+
+  // Fixup Elf32_Phdr p_vaddr to load at the desired address.
+  static bool FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address);
+
+  // Fixup symbol table
+  static bool FixupSymbols(ElfFile& elf_file, uintptr_t base_address, bool dynamic);
+
+  // Fixup dynamic relocations
+  static bool FixupRelocations(ElfFile& elf_file, uintptr_t base_address);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ElfFixup);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_ELF_FIXUP_H_
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
new file mode 100644
index 0000000..7fc662c
--- /dev/null
+++ b/compiler/elf_stripper.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_stripper.h"
+
+#include <vector>
+
+#include <llvm/Support/ELF.h>
+
+#include "UniquePtr.h"
+#include "base/logging.h"
+#include "elf_file.h"
+#include "utils.h"
+
+namespace art {
+
+bool ElfStripper::Strip(File* file) {
+  UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false));
+  CHECK(elf_file.get() != NULL);
+
+  // ELF files produced by MCLinker look roughly like this
+  //
+  // +------------+
+  // | Elf32_Ehdr | contains number of Elf32_Shdr and offset to first
+  // +------------+
+  // | Elf32_Phdr | program headers
+  // | Elf32_Phdr |
+  // | ...        |
+  // | Elf32_Phdr |
+  // +------------+
+  // | section    | mixture of needed and unneeded sections
+  // +------------+
+  // | section    |
+  // +------------+
+  // | ...        |
+  // +------------+
+  // | section    |
+  // +------------+
+  // | Elf32_Shdr | section headers
+  // | Elf32_Shdr |
+  // | ...        | contains offset to section start
+  // | Elf32_Shdr |
+  // +------------+
+  //
+  // To strip:
+  // - leave the Elf32_Ehdr and Elf32_Phdr values in place.
+  // - walk the sections making a new set of Elf32_Shdr section headers for what we want to keep
+  // - move the sections are keeping up to fill in gaps of sections we want to strip
+  // - write new Elf32_Shdr section headers to end of file, updating Elf32_Ehdr
+  // - truncate rest of file
+  //
+
+  std::vector<llvm::ELF::Elf32_Shdr> section_headers;
+  std::vector<llvm::ELF::Elf32_Word> section_headers_original_indexes;
+  section_headers.reserve(elf_file->GetSectionHeaderNum());
+
+
+  llvm::ELF::Elf32_Shdr& string_section = elf_file->GetSectionNameStringSection();
+  for (llvm::ELF::Elf32_Word i = 0; i < elf_file->GetSectionHeaderNum(); i++) {
+    llvm::ELF::Elf32_Shdr& sh = elf_file->GetSectionHeader(i);
+    const char* name = elf_file->GetString(string_section, sh.sh_name);
+    if (name == NULL) {
+      CHECK_EQ(0U, i);
+      section_headers.push_back(sh);
+      section_headers_original_indexes.push_back(0);
+      continue;
+    }
+    if (StartsWith(name, ".debug")
+        || (strcmp(name, ".strtab") == 0)
+        || (strcmp(name, ".symtab") == 0)) {
+      continue;
+    }
+    section_headers.push_back(sh);
+    section_headers_original_indexes.push_back(i);
+  }
+  CHECK_NE(0U, section_headers.size());
+  CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
+
+  // section 0 is the NULL section, sections start at offset of first section
+  llvm::ELF::Elf32_Off offset = elf_file->GetSectionHeader(1).sh_offset;
+  for (size_t i = 1; i < section_headers.size(); i++) {
+    llvm::ELF::Elf32_Shdr& new_sh = section_headers[i];
+    llvm::ELF::Elf32_Shdr& old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
+    CHECK_EQ(new_sh.sh_name, old_sh.sh_name);
+    if (old_sh.sh_addralign > 1) {
+      offset = RoundUp(offset, old_sh.sh_addralign);
+    }
+    if (old_sh.sh_offset == offset) {
+      // already in place
+      offset += old_sh.sh_size;
+      continue;
+    }
+    // shift section earlier
+    memmove(elf_file->Begin() + offset,
+            elf_file->Begin() + old_sh.sh_offset,
+            old_sh.sh_size);
+    new_sh.sh_offset = offset;
+    offset += old_sh.sh_size;
+  }
+
+  llvm::ELF::Elf32_Off shoff = offset;
+  size_t section_headers_size_in_bytes = section_headers.size() * sizeof(llvm::ELF::Elf32_Shdr);
+  memcpy(elf_file->Begin() + offset, &section_headers[0], section_headers_size_in_bytes);
+  offset += section_headers_size_in_bytes;
+
+  elf_file->GetHeader().e_shnum = section_headers.size();
+  elf_file->GetHeader().e_shoff = shoff;
+  int result = ftruncate(file->Fd(), offset);
+  if (result != 0) {
+    PLOG(ERROR) << "Failed to truncate while stripping ELF file: " << file->GetPath();
+    return false;
+  }
+  return true;
+}
+
+}  // namespace art
diff --git a/compiler/elf_stripper.h b/compiler/elf_stripper.h
new file mode 100644
index 0000000..b202e6e
--- /dev/null
+++ b/compiler/elf_stripper.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ELF_STRIPPER_H_
+#define ART_SRC_ELF_STRIPPER_H_
+
+#include "base/macros.h"
+#include "os.h"
+
+namespace art {
+
+class ElfStripper {
+ public:
+  // Strip an ELF file of unneeded debugging information.
+  // Returns true on success, false on failure.
+  static bool Strip(File* file);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ElfStripper);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_ELF_STRIPPER_H_
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
new file mode 100644
index 0000000..0823a53
--- /dev/null
+++ b/compiler/elf_writer.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_writer.h"
+
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "dex_method_iterator.h"
+#include "driver/compiler_driver.h"
+#include "elf_file.h"
+#include "invoke_type.h"
+#include "llvm/utils_llvm.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "oat.h"
+#include "oat_file.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+ElfWriter::ElfWriter(const CompilerDriver& driver, File* elf_file)
+  : compiler_driver_(&driver), elf_file_(elf_file) {}
+
+ElfWriter::~ElfWriter() {}
+
+llvm::ELF::Elf32_Addr ElfWriter::GetOatDataAddress(ElfFile* elf_file) {
+  llvm::ELF::Elf32_Addr oatdata_address = elf_file->FindSymbolAddress(llvm::ELF::SHT_DYNSYM,
+                                                                      "oatdata",
+                                                                      false);
+  CHECK_NE(0U, oatdata_address);
+  return oatdata_address;
+}
+
+void ElfWriter::GetOatElfInformation(File* file,
+                                     size_t& oat_loaded_size,
+                                     size_t& oat_data_offset) {
+  UniquePtr<ElfFile> elf_file(ElfFile::Open(file, false, false));
+  CHECK(elf_file.get() != NULL);
+
+  oat_loaded_size = elf_file->GetLoadedSize();
+  CHECK_NE(0U, oat_loaded_size);
+  oat_data_offset = GetOatDataAddress(elf_file.get());
+  CHECK_NE(0U, oat_data_offset);
+}
+
+}  // namespace art
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
new file mode 100644
index 0000000..7392e67
--- /dev/null
+++ b/compiler/elf_writer.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ELF_WRITER_H_
+#define ART_SRC_ELF_WRITER_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+#include <llvm/Support/ELF.h>
+
+#include "base/macros.h"
+#include "os.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexFile;
+class ElfFile;
+
+class ElfWriter {
+ public:
+  // Looks up information about location of oat file in elf file container.
+  // Used for ImageWriter to perform memory layout.
+  static void GetOatElfInformation(File* file,
+                                   size_t& oat_loaded_size,
+                                   size_t& oat_data_offset);
+
+  // Returns runtime oat_data runtime address for an opened ElfFile.
+  static llvm::ELF::Elf32_Addr GetOatDataAddress(ElfFile* elf_file);
+
+ protected:
+  ElfWriter(const CompilerDriver& driver, File* elf_file);
+  ~ElfWriter();
+
+  virtual bool Write(std::vector<uint8_t>& oat_contents,
+                     const std::vector<const DexFile*>& dex_files,
+                     const std::string& android_root,
+                     bool is_host)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
+  // Setup by constructor
+  const CompilerDriver* compiler_driver_;
+  File* elf_file_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_ELF_WRITER_H_
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
new file mode 100644
index 0000000..472a606
--- /dev/null
+++ b/compiler/elf_writer_mclinker.cc
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_writer_mclinker.h"
+
+#include <llvm/Support/TargetSelect.h>
+
+#include <mcld/Environment.h>
+#include <mcld/IRBuilder.h>
+#include <mcld/Linker.h>
+#include <mcld/LinkerConfig.h>
+#include <mcld/MC/ZOption.h>
+#include <mcld/Module.h>
+#include <mcld/Support/Path.h>
+#include <mcld/Support/TargetSelect.h>
+
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "dex_method_iterator.h"
+#include "driver/compiler_driver.h"
+#include "elf_file.h"
+#include "globals.h"
+#include "mirror/abstract_method.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+ElfWriterMclinker::ElfWriterMclinker(const CompilerDriver& driver, File* elf_file)
+  : ElfWriter(driver, elf_file), oat_input_(NULL) {}
+
+ElfWriterMclinker::~ElfWriterMclinker() {}
+
+bool ElfWriterMclinker::Create(File* elf_file,
+                               std::vector<uint8_t>& oat_contents,
+                               const std::vector<const DexFile*>& dex_files,
+                               const std::string& android_root,
+                               bool is_host,
+                               const CompilerDriver& driver) {
+  ElfWriterMclinker elf_writer(driver, elf_file);
+  return elf_writer.Write(oat_contents, dex_files, android_root, is_host);
+}
+
+bool ElfWriterMclinker::Write(std::vector<uint8_t>& oat_contents,
+                              const std::vector<const DexFile*>& dex_files,
+                              const std::string& android_root,
+                              bool is_host) {
+  Init();
+  AddOatInput(oat_contents);
+#if defined(ART_USE_PORTABLE_COMPILER)
+  AddMethodInputs(dex_files);
+  AddRuntimeInputs(android_root, is_host);
+#endif
+  if (!Link()) {
+    return false;
+  }
+#if defined(ART_USE_PORTABLE_COMPILER)
+  FixupOatMethodOffsets(dex_files);
+#endif
+  return true;
+}
+
+static void InitializeLLVM() {
+  // TODO: this is lifted from art's compiler_llvm.cc, should be factored out
+  if (kIsTargetBuild) {
+    llvm::InitializeNativeTarget();
+    // TODO: odd that there is no InitializeNativeTargetMC?
+  } else {
+    llvm::InitializeAllTargets();
+    llvm::InitializeAllTargetMCs();
+  }
+}
+
+void ElfWriterMclinker::Init() {
+  std::string target_triple;
+  std::string target_cpu;
+  std::string target_attr;
+  CompilerDriver::InstructionSetToLLVMTarget(compiler_driver_->GetInstructionSet(),
+                                             target_triple,
+                                             target_cpu,
+                                             target_attr);
+
+  // Based on mclinker's llvm-mcld.cpp main() and LinkerTest
+  //
+  // TODO: LinkerTest uses mcld::Initialize(), but it does an
+  // llvm::InitializeAllTargets, which we don't want. Basically we
+  // want mcld::InitializeNative, but it doesn't exist yet, so we
+  // inline the minimal we need here.
+  InitializeLLVM();
+  mcld::InitializeAllTargets();
+  mcld::InitializeAllLinkers();
+  mcld::InitializeAllEmulations();
+  mcld::InitializeAllDiagnostics();
+
+  linker_config_.reset(new mcld::LinkerConfig(target_triple));
+  CHECK(linker_config_.get() != NULL);
+  linker_config_->setCodeGenType(mcld::LinkerConfig::DynObj);
+  linker_config_->options().setSOName(elf_file_->GetPath());
+
+  // error on undefined symbols.
+  // TODO: should this just be set if kIsDebugBuild?
+  linker_config_->options().setNoUndefined(true);
+
+  if (compiler_driver_->GetInstructionSet() == kMips) {
+     // MCLinker defaults MIPS section alignment to 0x10000, not
+     // 0x1000.  The ABI says this is because the max page size is
+     // general is 64k but that isn't true on Android.
+     mcld::ZOption z_option;
+     z_option.setKind(mcld::ZOption::MaxPageSize);
+     z_option.setPageSize(kPageSize);
+     linker_config_->options().addZOption(z_option);
+  }
+
+  // TODO: Wire up mcld DiagnosticEngine to LOG?
+  linker_config_->options().setColor(false);
+  if (false) {
+    // enables some tracing of input file processing
+    linker_config_->options().setTrace(true);
+  }
+
+  // Based on alone::Linker::config
+  module_.reset(new mcld::Module(linker_config_->options().soname()));
+  CHECK(module_.get() != NULL);
+  ir_builder_.reset(new mcld::IRBuilder(*module_.get(), *linker_config_.get()));
+  CHECK(ir_builder_.get() != NULL);
+  linker_.reset(new mcld::Linker());
+  CHECK(linker_.get() != NULL);
+  linker_->config(*linker_config_.get());
+}
+
+void ElfWriterMclinker::AddOatInput(std::vector<uint8_t>& oat_contents) {
+  // Add an artificial memory input. Based on LinkerTest.
+  UniquePtr<OatFile> oat_file(OatFile::OpenMemory(oat_contents, elf_file_->GetPath()));
+  CHECK(oat_file.get() != NULL) << elf_file_->GetPath();
+
+  const char* oat_data_start = reinterpret_cast<const char*>(&oat_file->GetOatHeader());
+  const size_t oat_data_length = oat_file->GetOatHeader().GetExecutableOffset();
+  const char* oat_code_start = oat_data_start + oat_data_length;
+  const size_t oat_code_length = oat_file->Size() - oat_data_length;
+
+  // TODO: ownership of oat_input?
+  oat_input_ = ir_builder_->CreateInput("oat contents",
+                                        mcld::sys::fs::Path("oat contents path"),
+                                        mcld::Input::Object);
+  CHECK(oat_input_ != NULL);
+
+  // TODO: ownership of null_section?
+  mcld::LDSection* null_section = ir_builder_->CreateELFHeader(*oat_input_,
+                                                               "",
+                                                               mcld::LDFileFormat::Null,
+                                                               llvm::ELF::SHT_NULL,
+                                                               0);
+  CHECK(null_section != NULL);
+
+  // TODO: we should split readonly data from readonly executable
+  // code like .oat does.  We need to control section layout with
+  // linker script like functionality to guarantee references
+  // between sections maintain relative position which isn't
+  // possible right now with the mclinker APIs.
+  CHECK(oat_code_start != NULL);
+
+  // we need to ensure that oatdata is page aligned so when we
+  // fixup the segment load addresses, they remain page aligned.
+  uint32_t alignment = kPageSize;
+
+  // TODO: ownership of text_section?
+  mcld::LDSection* text_section = ir_builder_->CreateELFHeader(*oat_input_,
+                                                               ".text",
+                                                               llvm::ELF::SHT_PROGBITS,
+                                                               llvm::ELF::SHF_EXECINSTR
+                                                               | llvm::ELF::SHF_ALLOC,
+                                                               alignment);
+  CHECK(text_section != NULL);
+
+  mcld::SectionData* text_sectiondata = ir_builder_->CreateSectionData(*text_section);
+  CHECK(text_sectiondata != NULL);
+
+  // TODO: why does IRBuilder::CreateRegion take a non-const pointer?
+  mcld::Fragment* text_fragment = ir_builder_->CreateRegion(const_cast<char*>(oat_data_start),
+                                                            oat_file->Size());
+  CHECK(text_fragment != NULL);
+  ir_builder_->AppendFragment(*text_fragment, *text_sectiondata);
+
+  ir_builder_->AddSymbol(*oat_input_,
+                         "oatdata",
+                         mcld::ResolveInfo::Object,
+                         mcld::ResolveInfo::Define,
+                         mcld::ResolveInfo::Global,
+                         oat_data_length,  // size
+                         0,                // offset
+                         text_section);
+
+  ir_builder_->AddSymbol(*oat_input_,
+                         "oatexec",
+                         mcld::ResolveInfo::Function,
+                         mcld::ResolveInfo::Define,
+                         mcld::ResolveInfo::Global,
+                         oat_code_length,  // size
+                         oat_data_length,  // offset
+                         text_section);
+
+  ir_builder_->AddSymbol(*oat_input_,
+                         "oatlastword",
+                         mcld::ResolveInfo::Object,
+                         mcld::ResolveInfo::Define,
+                         mcld::ResolveInfo::Global,
+                         0,                // size
+                         // subtract a word so symbol is within section
+                         (oat_data_length + oat_code_length) - sizeof(uint32_t),  // offset
+                         text_section);
+}
+
+#if defined(ART_USE_PORTABLE_COMPILER)
+void ElfWriterMclinker::AddMethodInputs(const std::vector<const DexFile*>& dex_files) {
+  DCHECK(oat_input_ != NULL);
+
+  DexMethodIterator it(dex_files);
+  while (it.HasNext()) {
+    const DexFile& dex_file = it.GetDexFile();
+    uint32_t method_idx = it.GetMemberIndex();
+    const CompiledMethod* compiled_method =
+      compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
+    if (compiled_method != NULL) {
+      AddCompiledCodeInput(*compiled_method);
+    }
+    it.Next();
+  }
+  added_symbols_.clear();
+}
+
+void ElfWriterMclinker::AddCompiledCodeInput(const CompiledCode& compiled_code) {
+  // Check if we've seen this compiled code before. If so skip
+  // it. This can happen for reused code such as invoke stubs.
+  const std::string& symbol = compiled_code.GetSymbol();
+  SafeMap<const std::string*, const std::string*>::iterator it = added_symbols_.find(&symbol);
+  if (it != added_symbols_.end()) {
+    return;
+  }
+  added_symbols_.Put(&symbol, &symbol);
+
+  // Add input to supply code for symbol
+  const std::vector<uint8_t>& code = compiled_code.GetCode();
+  // TODO: ownership of code_input?
+  // TODO: why does IRBuilder::ReadInput take a non-const pointer?
+  mcld::Input* code_input = ir_builder_->ReadInput(symbol,
+                                                   const_cast<uint8_t*>(&code[0]),
+                                                   code.size());
+  CHECK(code_input != NULL);
+}
+
+void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool is_host) {
+  std::string libart_so(android_root);
+  libart_so += kIsDebugBuild ? "/lib/libartd.so" : "/lib/libart.so";
+  // TODO: ownership of libart_so_input?
+  mcld::Input* libart_so_input = ir_builder_->ReadInput(libart_so, libart_so);
+  CHECK(libart_so_input != NULL);
+
+  std::string host_prebuilt_dir("prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6");
+
+  std::string compiler_runtime_lib;
+  if (is_host) {
+    compiler_runtime_lib += host_prebuilt_dir;
+    compiler_runtime_lib += "/lib/gcc/i686-linux/4.6.x-google/libgcc.a";
+  } else {
+    compiler_runtime_lib += android_root;
+    compiler_runtime_lib += "/lib/libcompiler_rt.a";
+  }
+  // TODO: ownership of compiler_runtime_lib_input?
+  mcld::Input* compiler_runtime_lib_input = ir_builder_->ReadInput(compiler_runtime_lib,
+                                                                   compiler_runtime_lib);
+  CHECK(compiler_runtime_lib_input != NULL);
+
+  std::string libc_lib;
+  if (is_host) {
+    libc_lib += host_prebuilt_dir;
+    libc_lib += "/sysroot/usr/lib/libc.so.6";
+  } else {
+    libc_lib += android_root;
+    libc_lib += "/lib/libc.so";
+  }
+  // TODO: ownership of libc_lib_input?
+  mcld::Input* libc_lib_input_input = ir_builder_->ReadInput(libc_lib, libc_lib);
+  CHECK(libc_lib_input_input != NULL);
+
+  std::string libm_lib;
+  if (is_host) {
+    libm_lib += host_prebuilt_dir;
+    libm_lib += "/sysroot/usr/lib/libm.so";
+  } else {
+    libm_lib += android_root;
+    libm_lib += "/lib/libm.so";
+  }
+  // TODO: ownership of libm_lib_input?
+  mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib);
+  CHECK(libm_lib_input_input != NULL);
+
+}
+#endif
+
+bool ElfWriterMclinker::Link() {
+  // link inputs
+  if (!linker_->link(*module_.get(), *ir_builder_.get())) {
+    LOG(ERROR) << "Failed to link " << elf_file_->GetPath();
+    return false;
+  }
+
+  // emit linked output
+  // TODO: avoid dup of fd by fixing Linker::emit to not close the argument fd.
+  int fd = dup(elf_file_->Fd());
+  if (fd == -1) {
+    PLOG(ERROR) << "Failed to dup file descriptor for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!linker_->emit(fd)) {
+    LOG(ERROR) << "Failed to emit " << elf_file_->GetPath();
+    return false;
+  }
+  mcld::Finalize();
+  LOG(INFO) << "ELF file written successfully: " << elf_file_->GetPath();
+  return true;
+}
+
+#if defined(ART_USE_PORTABLE_COMPILER)
+void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files) {
+  UniquePtr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false));
+  CHECK(elf_file.get() != NULL) << elf_file_->GetPath();
+
+  llvm::ELF::Elf32_Addr oatdata_address = GetOatDataAddress(elf_file.get());
+  DexMethodIterator it(dex_files);
+  while (it.HasNext()) {
+    const DexFile& dex_file = it.GetDexFile();
+    uint32_t method_idx = it.GetMemberIndex();
+    InvokeType invoke_type = it.GetInvokeType();
+    mirror::AbstractMethod* method = NULL;
+    if (compiler_driver_->IsImage()) {
+      ClassLinker* linker = Runtime::Current()->GetClassLinker();
+      mirror::DexCache* dex_cache = linker->FindDexCache(dex_file);
+      // Unchecked as we hold mutator_lock_ on entry.
+      ScopedObjectAccessUnchecked soa(Thread::Current());
+      method = linker->ResolveMethod(dex_file, method_idx, dex_cache, NULL, NULL, invoke_type);
+      CHECK(method != NULL);
+    }
+    const CompiledMethod* compiled_method =
+      compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
+    if (compiled_method != NULL) {
+      uint32_t offset = FixupCompiledCodeOffset(*elf_file.get(), oatdata_address, *compiled_method);
+      // Don't overwrite static method trampoline
+      if (method != NULL &&
+          (!method->IsStatic() ||
+           method->IsConstructor() ||
+           method->GetDeclaringClass()->IsInitialized())) {
+        method->SetOatCodeOffset(offset);
+      }
+    }
+    it.Next();
+  }
+  symbol_to_compiled_code_offset_.clear();
+}
+
+uint32_t ElfWriterMclinker::FixupCompiledCodeOffset(ElfFile& elf_file,
+                                                    llvm::ELF::Elf32_Addr oatdata_address,
+                                                    const CompiledCode& compiled_code) {
+  const std::string& symbol = compiled_code.GetSymbol();
+  SafeMap<const std::string*, uint32_t>::iterator it = symbol_to_compiled_code_offset_.find(&symbol);
+  if (it != symbol_to_compiled_code_offset_.end()) {
+    return it->second;
+  }
+
+  llvm::ELF::Elf32_Addr compiled_code_address = elf_file.FindSymbolAddress(llvm::ELF::SHT_SYMTAB,
+                                                                           symbol,
+                                                                           true);
+  CHECK_NE(0U, compiled_code_address) << symbol;
+  CHECK_LT(oatdata_address, compiled_code_address) << symbol;
+  uint32_t compiled_code_offset = compiled_code_address - oatdata_address;
+  symbol_to_compiled_code_offset_.Put(&symbol, compiled_code_offset);
+
+  const std::vector<uint32_t>& offsets = compiled_code.GetOatdataOffsetsToCompliledCodeOffset();
+  for (uint32_t i = 0; i < offsets.size(); i++) {
+    uint32_t oatdata_offset = oatdata_address + offsets[i];
+    uint32_t* addr = reinterpret_cast<uint32_t*>(elf_file.Begin() + oatdata_offset);
+    *addr = compiled_code_offset;
+  }
+  return compiled_code_offset;
+}
+#endif
+
+}  // namespace art
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
new file mode 100644
index 0000000..21f23e1
--- /dev/null
+++ b/compiler/elf_writer_mclinker.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ELF_WRITER_MCLINKER_H_
+#define ART_SRC_ELF_WRITER_MCLINKER_H_
+
+#include "elf_writer.h"
+
+#include "UniquePtr.h"
+#include "safe_map.h"
+
+namespace mcld {
+class IRBuilder;
+class Input;
+class LDSection;
+class LDSymbol;
+class Linker;
+class LinkerConfig;
+class Module;
+} // namespace mcld
+
+namespace art {
+
+class CompiledCode;
+
+class ElfWriterMclinker : public ElfWriter {
+ public:
+
+  // Write an ELF file. Returns true on success, false on failure.
+  static bool Create(File* file,
+                     std::vector<uint8_t>& oat_contents,
+                     const std::vector<const DexFile*>& dex_files,
+                     const std::string& android_root,
+                     bool is_host,
+                     const CompilerDriver& driver)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ protected:
+  virtual bool Write(std::vector<uint8_t>& oat_contents,
+                     const std::vector<const DexFile*>& dex_files,
+                     const std::string& android_root,
+                     bool is_host)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+  ElfWriterMclinker(const CompilerDriver& driver, File* elf_file);
+  ~ElfWriterMclinker();
+
+  void Init();
+  void AddOatInput(std::vector<uint8_t>& oat_contents);
+  void AddMethodInputs(const std::vector<const DexFile*>& dex_files);
+  void AddCompiledCodeInput(const CompiledCode& compiled_code);
+  void AddRuntimeInputs(const std::string& android_root, bool is_host);
+  bool Link();
+#if defined(ART_USE_PORTABLE_COMPILER)
+  void FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t FixupCompiledCodeOffset(ElfFile& elf_file,
+                                   llvm::ELF::Elf32_Addr oatdata_address,
+                                   const CompiledCode& compiled_code);
+#endif
+
+  // Setup by Init()
+  UniquePtr<mcld::LinkerConfig> linker_config_;
+  UniquePtr<mcld::Module> module_;
+  UniquePtr<mcld::IRBuilder> ir_builder_;
+  UniquePtr<mcld::Linker> linker_;
+
+  // Setup by AddOatInput()
+  // TODO: ownership of oat_input_?
+  mcld::Input* oat_input_;
+
+  // Setup by AddCompiledCodeInput
+  // set of symbols for already added mcld::Inputs
+  SafeMap<const std::string*, const std::string*> added_symbols_;
+
+  // Setup by FixupCompiledCodeOffset
+  // map of symbol names to oatdata offset
+  SafeMap<const std::string*, uint32_t> symbol_to_compiled_code_offset_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterMclinker);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_ELF_WRITER_MCLINKER_H_
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
new file mode 100644
index 0000000..9de96d2
--- /dev/null
+++ b/compiler/elf_writer_quick.cc
@@ -0,0 +1,665 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_writer_quick.h"
+
+#include "base/logging.h"
+#include "base/unix_file/fd_file.h"
+#include "driver/compiler_driver.h"
+#include "globals.h"
+#include "oat.h"
+#include "utils.h"
+
+namespace art {
+
+ElfWriterQuick::ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
+  : ElfWriter(driver, elf_file) {}
+
+ElfWriterQuick::~ElfWriterQuick() {}
+
+bool ElfWriterQuick::Create(File* elf_file,
+                            std::vector<uint8_t>& oat_contents,
+                            const std::vector<const DexFile*>& dex_files,
+                            const std::string& android_root,
+                            bool is_host,
+                            const CompilerDriver& driver) {
+  ElfWriterQuick elf_writer(driver, elf_file);
+  return elf_writer.Write(oat_contents, dex_files, android_root, is_host);
+}
+
+bool ElfWriterQuick::Write(std::vector<uint8_t>& oat_contents,
+                           const std::vector<const DexFile*>& dex_files_unused,
+                           const std::string& android_root_unused,
+                           bool is_host_unused) {
+  const bool debug = false;
+  // +-------------------------+
+  // | Elf32_Ehdr              |
+  // +-------------------------+
+  // | Elf32_Phdr PHDR         |
+  // | Elf32_Phdr LOAD R       | .dynsym .dynstr .hash .rodata
+  // | Elf32_Phdr LOAD R X     | .text
+  // | Elf32_Phdr LOAD RW      | .dynamic
+  // | Elf32_Phdr DYNAMIC      | .dynamic
+  // +-------------------------+
+  // | .dynsym                 |
+  // | Elf32_Sym  STN_UNDEF    |
+  // | Elf32_Sym  oatdata      |
+  // | Elf32_Sym  oatexec      |
+  // | Elf32_Sym  oatlastword  |
+  // +-------------------------+
+  // | .dynstr                 |
+  // | \0                      |
+  // | oatdata\0               |
+  // | oatexec\0               |
+  // | oatlastword\0           |
+  // | boot.oat\0              |
+  // +-------------------------+
+  // | .hash                   |
+  // | Elf32_Word nbucket = 1  |
+  // | Elf32_Word nchain  = 3  |
+  // | Elf32_Word bucket[0] = 0|
+  // | Elf32_Word chain[0]  = 1|
+  // | Elf32_Word chain[1]  = 2|
+  // | Elf32_Word chain[2]  = 3|
+  // +-------------------------+
+  // | .rodata                 |
+  // | oatdata..oatexec-4      |
+  // +-------------------------+
+  // | .text                   |
+  // | oatexec..oatlastword    |
+  // +-------------------------+
+  // | .dynamic                |
+  // | Elf32_Dyn DT_SONAME     |
+  // | Elf32_Dyn DT_HASH       |
+  // | Elf32_Dyn DT_SYMTAB     |
+  // | Elf32_Dyn DT_SYMENT     |
+  // | Elf32_Dyn DT_STRTAB     |
+  // | Elf32_Dyn DT_STRSZ      |
+  // | Elf32_Dyn DT_NULL       |
+  // +-------------------------+
+  // | .shstrtab               |
+  // | \0                      |
+  // | .dynamic\0              |
+  // | .dynsym\0               |
+  // | .dynstr\0               |
+  // | .hash\0                 |
+  // | .rodata\0               |
+  // | .text\0                 |
+  // | .shstrtab\0             |
+  // +-------------------------+
+  // | Elf32_Shdr NULL         |
+  // | Elf32_Shdr .dynsym      |
+  // | Elf32_Shdr .dynstr      |
+  // | Elf32_Shdr .hash        |
+  // | Elf32_Shdr .text        |
+  // | Elf32_Shdr .rodata      |
+  // | Elf32_Shdr .dynamic     |
+  // | Elf32_Shdr .shstrtab    |
+  // +-------------------------+
+
+  // phase 1: computing offsets
+  uint32_t expected_offset = 0;
+
+  // Elf32_Ehdr
+  expected_offset += sizeof(llvm::ELF::Elf32_Ehdr);
+
+  // PHDR
+  uint32_t phdr_alignment = sizeof(llvm::ELF::Elf32_Word);
+  uint32_t phdr_offset = expected_offset;
+  const uint8_t PH_PHDR     = 0;
+  const uint8_t PH_LOAD_R__ = 1;
+  const uint8_t PH_LOAD_R_X = 2;
+  const uint8_t PH_LOAD_RW_ = 3;
+  const uint8_t PH_DYNAMIC  = 4;
+  const uint8_t PH_NUM      = 5;
+  uint32_t phdr_size = sizeof(llvm::ELF::Elf32_Phdr) * PH_NUM;
+  expected_offset += phdr_size;
+  if (debug) {
+    LOG(INFO) << "phdr_offset=" << phdr_offset << std::hex << " " << phdr_offset;
+    LOG(INFO) << "phdr_size=" << phdr_size << std::hex << " " << phdr_size;
+  }
+
+  // .dynsym
+  uint32_t dynsym_alignment = sizeof(llvm::ELF::Elf32_Word);
+  uint32_t dynsym_offset = expected_offset = RoundUp(expected_offset, dynsym_alignment);
+  const uint8_t SYM_UNDEF       = 0; // aka STN_UNDEF
+  const uint8_t SYM_OATDATA     = 1;
+  const uint8_t SYM_OATEXEC     = 2;
+  const uint8_t SYM_OATLASTWORD = 3;
+  const uint8_t SYM_NUM         = 4;
+  uint32_t dynsym_size = sizeof(llvm::ELF::Elf32_Sym) * SYM_NUM;
+  expected_offset += dynsym_size;
+  if (debug) {
+    LOG(INFO) << "dynsym_offset=" << dynsym_offset << std::hex << " " << dynsym_offset;
+    LOG(INFO) << "dynsym_size=" << dynsym_size << std::hex << " " << dynsym_size;
+  }
+
+  // .dynstr
+  uint32_t dynstr_alignment = 1;
+  uint32_t dynstr_offset = expected_offset = RoundUp(expected_offset, dynstr_alignment);
+  std::string dynstr;
+  dynstr += '\0';
+  uint32_t dynstr_oatdata_offset = dynstr.size();
+  dynstr += "oatdata";
+  dynstr += '\0';
+  uint32_t dynstr_oatexec_offset = dynstr.size();
+  dynstr += "oatexec";
+  dynstr += '\0';
+  uint32_t dynstr_oatlastword_offset = dynstr.size();
+  dynstr += "oatlastword";
+  dynstr += '\0';
+  uint32_t dynstr_soname_offset = dynstr.size();
+  std::string file_name(elf_file_->GetPath());
+  size_t directory_separator_pos = file_name.rfind('/');
+  if (directory_separator_pos != std::string::npos) {
+    file_name = file_name.substr(directory_separator_pos + 1);
+  }
+  dynstr += file_name;
+  dynstr += '\0';
+  uint32_t dynstr_size = dynstr.size();
+  expected_offset += dynstr_size;
+  if (debug) {
+    LOG(INFO) << "dynstr_offset=" << dynstr_offset << std::hex << " " << dynstr_offset;
+    LOG(INFO) << "dynstr_size=" << dynstr_size << std::hex << " " << dynstr_size;
+  }
+
+  // .hash
+  uint32_t hash_alignment = sizeof(llvm::ELF::Elf32_Word);  // Even for 64-bit
+  uint32_t hash_offset = expected_offset = RoundUp(expected_offset, hash_alignment);
+  const uint8_t HASH_NBUCKET = 0;
+  const uint8_t HASH_NCHAIN  = 1;
+  const uint8_t HASH_BUCKET0 = 2;
+  const uint8_t HASH_NUM     = HASH_BUCKET0 + 1 + SYM_NUM;
+  uint32_t hash_size = sizeof(llvm::ELF::Elf32_Word) * HASH_NUM;
+  expected_offset += hash_size;
+  if (debug) {
+    LOG(INFO) << "hash_offset=" << hash_offset << std::hex << " " << hash_offset;
+    LOG(INFO) << "hash_size=" << hash_size << std::hex << " " << hash_size;
+  }
+
+  // .rodata
+  uint32_t oat_data_alignment = kPageSize;
+  uint32_t oat_data_offset = expected_offset = RoundUp(expected_offset, oat_data_alignment);
+  const OatHeader* oat_header = reinterpret_cast<OatHeader*>(&oat_contents[0]);
+  CHECK(oat_header->IsValid());
+  uint32_t oat_data_size = oat_header->GetExecutableOffset();
+  expected_offset += oat_data_size;
+  if (debug) {
+    LOG(INFO) << "oat_data_offset=" << oat_data_offset << std::hex << " " << oat_data_offset;
+    LOG(INFO) << "oat_data_size=" << oat_data_size << std::hex << " " << oat_data_size;
+  }
+
+  // .text
+  uint32_t oat_exec_alignment = kPageSize;
+  CHECK_ALIGNED(expected_offset, kPageSize);
+  uint32_t oat_exec_offset = expected_offset = RoundUp(expected_offset, oat_exec_alignment);
+  uint32_t oat_exec_size = oat_contents.size() - oat_data_size;
+  expected_offset += oat_exec_size;
+  CHECK_EQ(oat_data_offset + oat_contents.size(), expected_offset);
+  if (debug) {
+    LOG(INFO) << "oat_exec_offset=" << oat_exec_offset << std::hex << " " << oat_exec_offset;
+    LOG(INFO) << "oat_exec_size=" << oat_exec_size << std::hex << " " << oat_exec_size;
+  }
+
+  // .dynamic
+  // alignment would naturally be sizeof(llvm::ELF::Elf32_Word), but we want this in a new segment
+  uint32_t dynamic_alignment = kPageSize;
+  uint32_t dynamic_offset = expected_offset = RoundUp(expected_offset, dynamic_alignment);
+  const uint8_t DH_SONAME = 0;
+  const uint8_t DH_HASH   = 1;
+  const uint8_t DH_SYMTAB = 2;
+  const uint8_t DH_SYMENT = 3;
+  const uint8_t DH_STRTAB = 4;
+  const uint8_t DH_STRSZ  = 5;
+  const uint8_t DH_NULL   = 6;
+  const uint8_t DH_NUM    = 7;
+  uint32_t dynamic_size = sizeof(llvm::ELF::Elf32_Dyn) * DH_NUM;
+  expected_offset += dynamic_size;
+  if (debug) {
+    LOG(INFO) << "dynamic_offset=" << dynamic_offset << std::hex << " " << dynamic_offset;
+    LOG(INFO) << "dynamic_size=" << dynamic_size << std::hex << " " << dynamic_size;
+  }
+
+  // .shstrtab
+  uint32_t shstrtab_alignment = 1;
+  uint32_t shstrtab_offset = expected_offset = RoundUp(expected_offset, shstrtab_alignment);
+  std::string shstrtab;
+  shstrtab += '\0';
+  uint32_t shstrtab_dynamic_offset = shstrtab.size();
+  CHECK_EQ(1U, shstrtab_dynamic_offset);
+  shstrtab += ".dynamic";
+  shstrtab += '\0';
+  uint32_t shstrtab_dynsym_offset = shstrtab.size();
+  shstrtab += ".dynsym";
+  shstrtab += '\0';
+  uint32_t shstrtab_dynstr_offset = shstrtab.size();
+  shstrtab += ".dynstr";
+  shstrtab += '\0';
+  uint32_t shstrtab_hash_offset = shstrtab.size();
+  shstrtab += ".hash";
+  shstrtab += '\0';
+  uint32_t shstrtab_rodata_offset = shstrtab.size();
+  shstrtab += ".rodata";
+  shstrtab += '\0';
+  uint32_t shstrtab_text_offset = shstrtab.size();
+  shstrtab += ".text";
+  shstrtab += '\0';
+  uint32_t shstrtab_shstrtab_offset = shstrtab.size();
+  shstrtab += ".shstrtab";
+  shstrtab += '\0';
+  uint32_t shstrtab_size = shstrtab.size();
+  expected_offset += shstrtab_size;
+  if (debug) {
+    LOG(INFO) << "shstrtab_offset=" << shstrtab_offset << std::hex << " " << shstrtab_offset;
+    LOG(INFO) << "shstrtab_size=" << shstrtab_size << std::hex << " " << shstrtab_size;
+  }
+
+  // section headers (after all sections)
+  uint32_t shdr_alignment = sizeof(llvm::ELF::Elf32_Word);
+  uint32_t shdr_offset = expected_offset = RoundUp(expected_offset, shdr_alignment);
+  const uint8_t SH_NULL     = 0;
+  const uint8_t SH_DYNSYM   = 1;
+  const uint8_t SH_DYNSTR   = 2;
+  const uint8_t SH_HASH     = 3;
+  const uint8_t SH_RODATA   = 4;
+  const uint8_t SH_TEXT     = 5;
+  const uint8_t SH_DYNAMIC  = 6;
+  const uint8_t SH_SHSTRTAB = 7;
+  const uint8_t SH_NUM      = 8;
+  uint32_t shdr_size = sizeof(llvm::ELF::Elf32_Shdr) * SH_NUM;
+  expected_offset += shdr_size;
+  if (debug) {
+    LOG(INFO) << "shdr_offset=" << shdr_offset << std::hex << " " << shdr_offset;
+    LOG(INFO) << "shdr_size=" << shdr_size << std::hex << " " << shdr_size;
+  }
+
+  // phase 2: initializing data
+
+  // Elf32_Ehdr
+  llvm::ELF::Elf32_Ehdr elf_header;
+  memset(&elf_header, 0, sizeof(elf_header));
+  elf_header.e_ident[llvm::ELF::EI_MAG0]       = llvm::ELF::ElfMagic[0];
+  elf_header.e_ident[llvm::ELF::EI_MAG1]       = llvm::ELF::ElfMagic[1];
+  elf_header.e_ident[llvm::ELF::EI_MAG2]       = llvm::ELF::ElfMagic[2];
+  elf_header.e_ident[llvm::ELF::EI_MAG3]       = llvm::ELF::ElfMagic[3];
+  elf_header.e_ident[llvm::ELF::EI_CLASS]      = llvm::ELF::ELFCLASS32;
+  elf_header.e_ident[llvm::ELF::EI_DATA]       = llvm::ELF::ELFDATA2LSB;
+  elf_header.e_ident[llvm::ELF::EI_VERSION]    = llvm::ELF::EV_CURRENT;
+  elf_header.e_ident[llvm::ELF::EI_OSABI]      = llvm::ELF::ELFOSABI_LINUX;
+  elf_header.e_ident[llvm::ELF::EI_ABIVERSION] = 0;
+  elf_header.e_type = llvm::ELF::ET_DYN;
+  switch (compiler_driver_->GetInstructionSet()) {
+    case kThumb2: {
+      elf_header.e_machine = llvm::ELF::EM_ARM;
+      elf_header.e_flags = llvm::ELF::EF_ARM_EABI_VER5;
+      break;
+    }
+    case kX86: {
+      elf_header.e_machine = llvm::ELF::EM_386;
+      elf_header.e_flags = 0;
+      break;
+    }
+    case kMips: {
+      elf_header.e_machine = llvm::ELF::EM_MIPS;
+      elf_header.e_flags = (llvm::ELF::EF_MIPS_NOREORDER |
+                            llvm::ELF::EF_MIPS_PIC       |
+                            llvm::ELF::EF_MIPS_CPIC      |
+                            llvm::ELF::EF_MIPS_ABI_O32   |
+                            llvm::ELF::EF_MIPS_ARCH_32R2);
+      break;
+    }
+    case kArm:
+    default: {
+      LOG(FATAL) << "Unknown instruction set: " << compiler_driver_->GetInstructionSet();
+      break;
+    }
+  }
+  elf_header.e_version = 1;
+  elf_header.e_entry = 0;
+  elf_header.e_phoff = phdr_offset;
+  elf_header.e_shoff = shdr_offset;
+  elf_header.e_ehsize = sizeof(llvm::ELF::Elf32_Ehdr);
+  elf_header.e_phentsize = sizeof(llvm::ELF::Elf32_Phdr);
+  elf_header.e_phnum = PH_NUM;
+  elf_header.e_shentsize = sizeof(llvm::ELF::Elf32_Shdr);
+  elf_header.e_shnum = SH_NUM;
+  elf_header.e_shstrndx = SH_SHSTRTAB;
+
+  // PHDR
+  llvm::ELF::Elf32_Phdr program_headers[PH_NUM];
+  memset(&program_headers, 0, sizeof(program_headers));
+
+  program_headers[PH_PHDR].p_type    = llvm::ELF::PT_PHDR;
+  program_headers[PH_PHDR].p_offset  = phdr_offset;
+  program_headers[PH_PHDR].p_vaddr   = phdr_offset;
+  program_headers[PH_PHDR].p_paddr   = phdr_offset;
+  program_headers[PH_PHDR].p_filesz  = sizeof(program_headers);
+  program_headers[PH_PHDR].p_memsz   = sizeof(program_headers);
+  program_headers[PH_PHDR].p_flags   = llvm::ELF::PF_R;
+  program_headers[PH_PHDR].p_align   = phdr_alignment;
+
+  program_headers[PH_LOAD_R__].p_type    = llvm::ELF::PT_LOAD;
+  program_headers[PH_LOAD_R__].p_offset  = 0;
+  program_headers[PH_LOAD_R__].p_vaddr   = 0;
+  program_headers[PH_LOAD_R__].p_paddr   = 0;
+  program_headers[PH_LOAD_R__].p_filesz  = oat_data_offset + oat_data_size;
+  program_headers[PH_LOAD_R__].p_memsz   = oat_data_offset + oat_data_size;
+  program_headers[PH_LOAD_R__].p_flags   = llvm::ELF::PF_R;
+  program_headers[PH_LOAD_R__].p_align   = oat_data_alignment;
+
+  program_headers[PH_LOAD_R_X].p_type    = llvm::ELF::PT_LOAD;
+  program_headers[PH_LOAD_R_X].p_offset  = oat_exec_offset;
+  program_headers[PH_LOAD_R_X].p_vaddr   = oat_exec_offset;
+  program_headers[PH_LOAD_R_X].p_paddr   = oat_exec_offset;
+  program_headers[PH_LOAD_R_X].p_filesz  = oat_exec_size;
+  program_headers[PH_LOAD_R_X].p_memsz   = oat_exec_size;
+  program_headers[PH_LOAD_R_X].p_flags   = llvm::ELF::PF_R | llvm::ELF::PF_X;
+  program_headers[PH_LOAD_R_X].p_align   = oat_exec_alignment;
+
+  // TODO: PF_W for DYNAMIC is considered processor specific, do we need it?
+  program_headers[PH_LOAD_RW_].p_type    = llvm::ELF::PT_LOAD;
+  program_headers[PH_LOAD_RW_].p_offset  = dynamic_offset;
+  program_headers[PH_LOAD_RW_].p_vaddr   = dynamic_offset;
+  program_headers[PH_LOAD_RW_].p_paddr   = dynamic_offset;
+  program_headers[PH_LOAD_RW_].p_filesz  = dynamic_size;
+  program_headers[PH_LOAD_RW_].p_memsz   = dynamic_size;
+  program_headers[PH_LOAD_RW_].p_flags   = llvm::ELF::PF_R | llvm::ELF::PF_W;
+  program_headers[PH_LOAD_RW_].p_align   = dynamic_alignment;
+
+  // TODO: PF_W for DYNAMIC is considered processor specific, do we need it?
+  program_headers[PH_DYNAMIC].p_type    = llvm::ELF::PT_DYNAMIC;
+  program_headers[PH_DYNAMIC].p_offset  = dynamic_offset;
+  program_headers[PH_DYNAMIC].p_vaddr   = dynamic_offset;
+  program_headers[PH_DYNAMIC].p_paddr   = dynamic_offset;
+  program_headers[PH_DYNAMIC].p_filesz  = dynamic_size;
+  program_headers[PH_DYNAMIC].p_memsz   = dynamic_size;
+  program_headers[PH_DYNAMIC].p_flags   = llvm::ELF::PF_R | llvm::ELF::PF_W;
+  program_headers[PH_DYNAMIC].p_align   = dynamic_alignment;
+
+  // .dynsym
+  llvm::ELF::Elf32_Sym dynsym[SYM_NUM];
+  memset(&dynsym, 0, sizeof(dynsym));
+
+  dynsym[SYM_UNDEF].st_name  = 0;
+  dynsym[SYM_UNDEF].st_value = 0;
+  dynsym[SYM_UNDEF].st_size  = 0;
+  dynsym[SYM_UNDEF].st_info  = 0;
+  dynsym[SYM_UNDEF].st_other = 0;
+  dynsym[SYM_UNDEF].st_shndx = 0;
+
+  dynsym[SYM_OATDATA].st_name  = dynstr_oatdata_offset;
+  dynsym[SYM_OATDATA].st_value = oat_data_offset;
+  dynsym[SYM_OATDATA].st_size  = oat_data_size;
+  dynsym[SYM_OATDATA].setBindingAndType(llvm::ELF::STB_GLOBAL, llvm::ELF::STT_OBJECT);
+  dynsym[SYM_OATDATA].st_other = llvm::ELF::STV_DEFAULT;
+  dynsym[SYM_OATDATA].st_shndx = SH_RODATA;
+
+  dynsym[SYM_OATEXEC].st_name  = dynstr_oatexec_offset;
+  dynsym[SYM_OATEXEC].st_value = oat_exec_offset;
+  dynsym[SYM_OATEXEC].st_size  = oat_exec_size;
+  dynsym[SYM_OATEXEC].setBindingAndType(llvm::ELF::STB_GLOBAL, llvm::ELF::STT_OBJECT);
+  dynsym[SYM_OATEXEC].st_other = llvm::ELF::STV_DEFAULT;
+  dynsym[SYM_OATEXEC].st_shndx = SH_TEXT;
+
+  dynsym[SYM_OATLASTWORD].st_name  = dynstr_oatlastword_offset;
+  dynsym[SYM_OATLASTWORD].st_value = oat_exec_offset + oat_exec_size - 4;
+  dynsym[SYM_OATLASTWORD].st_size  = 4;
+  dynsym[SYM_OATLASTWORD].setBindingAndType(llvm::ELF::STB_GLOBAL, llvm::ELF::STT_OBJECT);
+  dynsym[SYM_OATLASTWORD].st_other = llvm::ELF::STV_DEFAULT;
+  dynsym[SYM_OATLASTWORD].st_shndx = SH_TEXT;
+
+  // .dynstr initialized above as dynstr
+
+  // .hash
+  llvm::ELF::Elf32_Word hash[HASH_NUM];  // Note this is Elf32_Word even on 64-bit
+  hash[HASH_NBUCKET] = 1;
+  hash[HASH_NCHAIN]  = SYM_NUM;
+  hash[HASH_BUCKET0] = SYM_OATDATA;
+  hash[HASH_BUCKET0 + 1 + SYM_UNDEF]       = SYM_UNDEF;
+  hash[HASH_BUCKET0 + 1 + SYM_OATDATA]     = SYM_OATEXEC;
+  hash[HASH_BUCKET0 + 1 + SYM_OATEXEC]     = SYM_OATLASTWORD;
+  hash[HASH_BUCKET0 + 1 + SYM_OATLASTWORD] = SYM_UNDEF;
+
+  // .rodata and .text content come from oat_contents
+
+  // .dynamic
+  llvm::ELF::Elf32_Dyn dynamic_headers[DH_NUM];
+  memset(&dynamic_headers, 0, sizeof(dynamic_headers));
+
+  dynamic_headers[DH_SONAME].d_tag = llvm::ELF::DT_SONAME;
+  dynamic_headers[DH_SONAME].d_un.d_val = dynstr_soname_offset;
+
+  dynamic_headers[DH_HASH].d_tag = llvm::ELF::DT_HASH;
+  dynamic_headers[DH_HASH].d_un.d_ptr = hash_offset;
+
+  dynamic_headers[DH_SYMTAB].d_tag = llvm::ELF::DT_SYMTAB;
+  dynamic_headers[DH_SYMTAB].d_un.d_ptr = dynsym_offset;
+
+  dynamic_headers[DH_SYMENT].d_tag = llvm::ELF::DT_SYMENT;
+  dynamic_headers[DH_SYMENT].d_un.d_val = sizeof(llvm::ELF::Elf32_Sym);
+
+  dynamic_headers[DH_STRTAB].d_tag = llvm::ELF::DT_STRTAB;
+  dynamic_headers[DH_STRTAB].d_un.d_ptr = dynstr_offset;
+
+  dynamic_headers[DH_STRSZ].d_tag = llvm::ELF::DT_STRSZ;
+  dynamic_headers[DH_STRSZ].d_un.d_val = dynstr_size;
+
+  dynamic_headers[DH_NULL].d_tag = llvm::ELF::DT_NULL;
+  dynamic_headers[DH_NULL].d_un.d_val = 0;
+
+  // .shstrtab initialized above as shstrtab
+
+  // section headers (after all sections)
+  llvm::ELF::Elf32_Shdr section_headers[SH_NUM];
+  memset(&section_headers, 0, sizeof(section_headers));
+
+  section_headers[SH_NULL].sh_name      = 0;
+  section_headers[SH_NULL].sh_type      = llvm::ELF::SHT_NULL;
+  section_headers[SH_NULL].sh_flags     = 0;
+  section_headers[SH_NULL].sh_addr      = 0;
+  section_headers[SH_NULL].sh_offset    = 0;
+  section_headers[SH_NULL].sh_size      = 0;
+  section_headers[SH_NULL].sh_link      = 0;
+  section_headers[SH_NULL].sh_info      = 0;
+  section_headers[SH_NULL].sh_addralign = 0;
+  section_headers[SH_NULL].sh_entsize   = 0;
+
+  section_headers[SH_DYNSYM].sh_name      = shstrtab_dynsym_offset;
+  section_headers[SH_DYNSYM].sh_type      = llvm::ELF::SHT_DYNSYM;
+  section_headers[SH_DYNSYM].sh_flags     = llvm::ELF::SHF_ALLOC;
+  section_headers[SH_DYNSYM].sh_addr      = dynsym_offset;
+  section_headers[SH_DYNSYM].sh_offset    = dynsym_offset;
+  section_headers[SH_DYNSYM].sh_size      = dynsym_size;
+  section_headers[SH_DYNSYM].sh_link      = SH_DYNSTR;
+  section_headers[SH_DYNSYM].sh_info      = 1;  // 1 because we have not STB_LOCAL symbols
+  section_headers[SH_DYNSYM].sh_addralign = dynsym_alignment;
+  section_headers[SH_DYNSYM].sh_entsize   = sizeof(llvm::ELF::Elf32_Sym);
+
+  section_headers[SH_DYNSTR].sh_name      = shstrtab_dynstr_offset;
+  section_headers[SH_DYNSTR].sh_type      = llvm::ELF::SHT_STRTAB;
+  section_headers[SH_DYNSTR].sh_flags     = llvm::ELF::SHF_ALLOC;
+  section_headers[SH_DYNSTR].sh_addr      = dynstr_offset;
+  section_headers[SH_DYNSTR].sh_offset    = dynstr_offset;
+  section_headers[SH_DYNSTR].sh_size      = dynstr_size;
+  section_headers[SH_DYNSTR].sh_link      = 0;
+  section_headers[SH_DYNSTR].sh_info      = 0;
+  section_headers[SH_DYNSTR].sh_addralign = dynstr_alignment;
+  section_headers[SH_DYNSTR].sh_entsize   = 0;
+
+  section_headers[SH_HASH].sh_name      = shstrtab_hash_offset;
+  section_headers[SH_HASH].sh_type      = llvm::ELF::SHT_HASH;
+  section_headers[SH_HASH].sh_flags     = llvm::ELF::SHF_ALLOC;
+  section_headers[SH_HASH].sh_addr      = hash_offset;
+  section_headers[SH_HASH].sh_offset    = hash_offset;
+  section_headers[SH_HASH].sh_size      = hash_size;
+  section_headers[SH_HASH].sh_link      = SH_DYNSYM;
+  section_headers[SH_HASH].sh_info      = 0;
+  section_headers[SH_HASH].sh_addralign = hash_alignment;
+  section_headers[SH_HASH].sh_entsize   = sizeof(llvm::ELF::Elf32_Word); // This is Elf32_Word even on 64-bit
+
+  section_headers[SH_RODATA].sh_name      = shstrtab_rodata_offset;
+  section_headers[SH_RODATA].sh_type      = llvm::ELF::SHT_PROGBITS;
+  section_headers[SH_RODATA].sh_flags     = llvm::ELF::SHF_ALLOC;
+  section_headers[SH_RODATA].sh_addr      = oat_data_offset;
+  section_headers[SH_RODATA].sh_offset    = oat_data_offset;
+  section_headers[SH_RODATA].sh_size      = oat_data_size;
+  section_headers[SH_RODATA].sh_link      = 0;
+  section_headers[SH_RODATA].sh_info      = 0;
+  section_headers[SH_RODATA].sh_addralign = oat_data_alignment;
+  section_headers[SH_RODATA].sh_entsize   = 0;
+
+  section_headers[SH_TEXT].sh_name      = shstrtab_text_offset;
+  section_headers[SH_TEXT].sh_type      = llvm::ELF::SHT_PROGBITS;
+  section_headers[SH_TEXT].sh_flags     = llvm::ELF::SHF_ALLOC | llvm::ELF::SHF_EXECINSTR;
+  section_headers[SH_TEXT].sh_addr      = oat_exec_offset;
+  section_headers[SH_TEXT].sh_offset    = oat_exec_offset;
+  section_headers[SH_TEXT].sh_size      = oat_exec_size;
+  section_headers[SH_TEXT].sh_link      = 0;
+  section_headers[SH_TEXT].sh_info      = 0;
+  section_headers[SH_TEXT].sh_addralign = oat_exec_alignment;
+  section_headers[SH_TEXT].sh_entsize   = 0;
+
+  // TODO: SHF_WRITE for .dynamic is considered processor specific, do we need it?
+  section_headers[SH_DYNAMIC].sh_name      = shstrtab_dynamic_offset;
+  section_headers[SH_DYNAMIC].sh_type      = llvm::ELF::SHT_DYNAMIC;
+  section_headers[SH_DYNAMIC].sh_flags     = llvm::ELF::SHF_WRITE | llvm::ELF::SHF_ALLOC;
+  section_headers[SH_DYNAMIC].sh_addr      = dynamic_offset;
+  section_headers[SH_DYNAMIC].sh_offset    = dynamic_offset;
+  section_headers[SH_DYNAMIC].sh_size      = dynamic_size;
+  section_headers[SH_DYNAMIC].sh_link      = SH_DYNSTR;
+  section_headers[SH_DYNAMIC].sh_info      = 0;
+  section_headers[SH_DYNAMIC].sh_addralign = dynamic_alignment;
+  section_headers[SH_DYNAMIC].sh_entsize   = sizeof(llvm::ELF::Elf32_Dyn);
+
+  section_headers[SH_SHSTRTAB].sh_name      = shstrtab_shstrtab_offset;
+  section_headers[SH_SHSTRTAB].sh_type      = llvm::ELF::SHT_STRTAB;
+  section_headers[SH_SHSTRTAB].sh_flags     = 0;
+  section_headers[SH_SHSTRTAB].sh_addr      = shstrtab_offset;
+  section_headers[SH_SHSTRTAB].sh_offset    = shstrtab_offset;
+  section_headers[SH_SHSTRTAB].sh_size      = shstrtab_size;
+  section_headers[SH_SHSTRTAB].sh_link      = 0;
+  section_headers[SH_SHSTRTAB].sh_info      = 0;
+  section_headers[SH_SHSTRTAB].sh_addralign = shstrtab_alignment;
+  section_headers[SH_SHSTRTAB].sh_entsize   = 0;
+
+  // phase 3: writing file
+
+  // Elf32_Ehdr
+  if (!elf_file_->WriteFully(&elf_header, sizeof(elf_header))) {
+    PLOG(ERROR) << "Failed to write ELF header for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // PHDR
+  if (static_cast<off_t>(phdr_offset) != lseek(elf_file_->Fd(), 0, SEEK_CUR)) {
+    PLOG(ERROR) << "Failed to be at expected ELF program header offset phdr_offset "
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(program_headers, sizeof(program_headers))) {
+    PLOG(ERROR) << "Failed to write ELF program headers for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .dynsym
+  DCHECK_LE(phdr_offset + phdr_size, dynsym_offset);
+  if (static_cast<off_t>(dynsym_offset) != lseek(elf_file_->Fd(), dynsym_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .dynsym offset location " << dynsym_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(dynsym, sizeof(dynsym))) {
+    PLOG(ERROR) << "Failed to write .dynsym for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .dynstr
+  DCHECK_LE(dynsym_offset + dynsym_size, dynstr_offset);
+  if (static_cast<off_t>(dynstr_offset) != lseek(elf_file_->Fd(), dynstr_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .dynstr offset " << dynstr_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(&dynstr[0], dynsym_size)) {
+    PLOG(ERROR) << "Failed to write .dynsym for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .hash
+  DCHECK_LE(dynstr_offset + dynstr_size, hash_offset);
+  if (static_cast<off_t>(hash_offset) != lseek(elf_file_->Fd(), hash_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .hash offset " << hash_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(hash, sizeof(hash))) {
+    PLOG(ERROR) << "Failed to write .dynsym for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .rodata .text
+  DCHECK_LE(hash_offset + hash_size, oat_data_offset);
+  if (static_cast<off_t>(oat_data_offset) != lseek(elf_file_->Fd(), oat_data_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .rodata offset " << oat_data_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(&oat_contents[0], oat_contents.size())) {
+    PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .dynamic
+  DCHECK_LE(oat_data_offset + oat_contents.size(), dynamic_offset);
+  if (static_cast<off_t>(dynamic_offset) != lseek(elf_file_->Fd(), dynamic_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .dynamic offset " << dynamic_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(&dynamic_headers[0], dynamic_size)) {
+    PLOG(ERROR) << "Failed to write .dynamic for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // .shstrtab
+  DCHECK_LE(dynamic_offset + dynamic_size, shstrtab_offset);
+  if (static_cast<off_t>(shstrtab_offset) != lseek(elf_file_->Fd(), shstrtab_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to .shstrtab offset " << shstrtab_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(&shstrtab[0], shstrtab_size)) {
+    PLOG(ERROR) << "Failed to write .shstrtab for " << elf_file_->GetPath();
+    return false;
+  }
+
+  // section headers (after all sections)
+  DCHECK_LE(shstrtab_offset + shstrtab_size, shdr_offset);
+  if (static_cast<off_t>(shdr_offset) != lseek(elf_file_->Fd(), shdr_offset, SEEK_SET)) {
+    PLOG(ERROR) << "Failed to seek to ELF section headers offset " << shdr_offset
+                << " for " << elf_file_->GetPath();
+    return false;
+  }
+  if (!elf_file_->WriteFully(section_headers, sizeof(section_headers))) {
+    PLOG(ERROR) << "Failed to write ELF section headers for " << elf_file_->GetPath();
+    return false;
+  }
+
+  LOG(INFO) << "ELF file written successfully: " << elf_file_->GetPath();
+  return true;
+}
+
+}  // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
new file mode 100644
index 0000000..a1a386b
--- /dev/null
+++ b/compiler/elf_writer_quick.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ELF_WRITER_MCLINKER_H_
+#define ART_SRC_ELF_WRITER_MCLINKER_H_
+
+#include "elf_writer.h"
+
+namespace art {
+
+class ElfWriterQuick : public ElfWriter {
+ public:
+  // Write an ELF file. Returns true on success, false on failure.
+  static bool Create(File* file,
+                     std::vector<uint8_t>& oat_contents,
+                     const std::vector<const DexFile*>& dex_files,
+                     const std::string& android_root,
+                     bool is_host,
+                     const CompilerDriver& driver)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ protected:
+  virtual bool Write(std::vector<uint8_t>& oat_contents,
+                     const std::vector<const DexFile*>& dex_files,
+                     const std::string& android_root,
+                     bool is_host)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+  ElfWriterQuick(const CompilerDriver& driver, File* elf_file);
+  ~ElfWriterQuick();
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_ELF_WRITER_MCLINKER_H_
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
new file mode 100644
index 0000000..d4486d2
--- /dev/null
+++ b/compiler/elf_writer_test.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+
+#include "oat.h"
+#include "elf_file.h"
+
+namespace art {
+
+class ElfWriterTest : public CommonTest {
+
+ protected:
+  virtual void SetUp() {
+    ReserveImageSpace();
+    CommonTest::SetUp();
+  }
+};
+
+#define EXPECT_ELF_FILE_ADDRESS(ef, value, name, build_map) \
+  EXPECT_EQ(value, reinterpret_cast<void*>(ef->FindSymbolAddress(::llvm::ELF::SHT_DYNSYM, name, build_map))); \
+  EXPECT_EQ(value, ef->FindDynamicSymbolAddress(name)); \
+
+TEST_F(ElfWriterTest, dlsym) {
+  std::string elf_filename;
+  if (IsHost()) {
+    const char* host_dir = getenv("ANDROID_HOST_OUT");
+    CHECK(host_dir != NULL);
+    elf_filename = StringPrintf("%s/framework/core.oat", host_dir);
+  } else {
+    elf_filename = "/data/art-test/core.oat";
+  }
+  LOG(INFO) << "elf_filename=" << elf_filename;
+
+  UnreserveImageSpace();
+  void* dl_oat_so = dlopen(elf_filename.c_str(), RTLD_NOW);
+  ASSERT_TRUE(dl_oat_so != NULL) << dlerror();
+  void* dl_oatdata = dlsym(dl_oat_so, "oatdata");
+  ASSERT_TRUE(dl_oatdata != NULL);
+
+  OatHeader* dl_oat_header = reinterpret_cast<OatHeader*>(dl_oatdata);
+  ASSERT_TRUE(dl_oat_header->IsValid());
+  void* dl_oatexec = dlsym(dl_oat_so, "oatexec");
+  ASSERT_TRUE(dl_oatexec != NULL);
+  ASSERT_LT(dl_oatdata, dl_oatexec);
+
+  void* dl_oatlastword = dlsym(dl_oat_so, "oatlastword");
+  ASSERT_TRUE(dl_oatlastword != NULL);
+  ASSERT_LT(dl_oatexec, dl_oatlastword);
+
+  ASSERT_EQ(0, dlclose(dl_oat_so));
+
+  UniquePtr<File> file(OS::OpenFile(elf_filename.c_str(), false));
+  ASSERT_TRUE(file.get() != NULL);
+  {
+    UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false));
+    CHECK(ef.get() != NULL);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", false);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", false);
+  }
+  {
+    UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false));
+    CHECK(ef.get() != NULL);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", true);
+    EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true);
+  }
+  {
+    UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, true));
+    CHECK(ef.get() != NULL);
+    ef->Load();
+    EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata"));
+    EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec"));
+    EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword"));
+  }
+}
+
+}  // namespace art
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
new file mode 100644
index 0000000..8d32a91
--- /dev/null
+++ b/compiler/image_writer.cc
@@ -0,0 +1,680 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "image_writer.h"
+
+#include <sys/stat.h>
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
+#include "elf_writer.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/heap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "globals.h"
+#include "image.h"
+#include "intern_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "oat.h"
+#include "oat_file.h"
+#include "object_utils.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+#include "UniquePtr.h"
+#include "utils.h"
+
+using namespace art::mirror;
+
+namespace art {
+
+bool ImageWriter::Write(const std::string& image_filename,
+                        uintptr_t image_begin,
+                        const std::string& oat_filename,
+                        const std::string& oat_location) {
+  CHECK(!image_filename.empty());
+
+  CHECK_NE(image_begin, 0U);
+  image_begin_ = reinterpret_cast<byte*>(image_begin);
+
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  const std::vector<DexCache*>& all_dex_caches = class_linker->GetDexCaches();
+  for (size_t i = 0; i < all_dex_caches.size(); i++) {
+    DexCache* dex_cache = all_dex_caches[i];
+    dex_caches_.insert(dex_cache);
+  }
+
+  UniquePtr<File> oat_file(OS::OpenFile(oat_filename.c_str(), true, false));
+  if (oat_file.get() == NULL) {
+    LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
+    return false;
+  }
+  oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location);
+  class_linker->RegisterOatFile(*oat_file_);
+  interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset();
+  interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset();
+  portable_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
+  quick_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
+
+  {
+    Thread::Current()->TransitionFromSuspendedToRunnable();
+    PruneNonImageClasses();  // Remove junk
+    ComputeLazyFieldsForImageClasses();  // Add useful information
+    ComputeEagerResolvedStrings();
+    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+  }
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  heap->CollectGarbage(false);  // Remove garbage.
+  // Trim size of alloc spaces.
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
+  // TODO: C++0x auto
+  typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    gc::space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      space->AsDlMallocSpace()->Trim();
+    }
+  }
+
+  if (!AllocMemory()) {
+    return false;
+  }
+#ifndef NDEBUG
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    CheckNonImageClassesRemoved();
+  }
+#endif
+  Thread::Current()->TransitionFromSuspendedToRunnable();
+  size_t oat_loaded_size = 0;
+  size_t oat_data_offset = 0;
+  ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset);
+  CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset);
+  CopyAndFixupObjects();
+  PatchOatCodeAndMethods();
+  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+
+  UniquePtr<File> image_file(OS::OpenFile(image_filename.c_str(), true));
+  if (image_file.get() == NULL) {
+    LOG(ERROR) << "Failed to open image file " << image_filename;
+    return false;
+  }
+  if (fchmod(image_file->Fd(), 0644) != 0) {
+    PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
+    return EXIT_FAILURE;
+  }
+  bool success = image_file->WriteFully(image_->Begin(), image_end_);
+  if (!success) {
+    PLOG(ERROR) << "Failed to write image file " << image_filename;
+    return false;
+  }
+  return true;
+}
+
+bool ImageWriter::AllocMemory() {
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
+  size_t size = 0;
+  // TODO: C++0x auto
+  typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    gc::space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      size += space->Size();
+    }
+  }
+
+  int prot = PROT_READ | PROT_WRITE;
+  size_t length = RoundUp(size, kPageSize);
+  image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, prot));
+  if (image_.get() == NULL) {
+    LOG(ERROR) << "Failed to allocate memory for image file generation";
+    return false;
+  }
+  return true;
+}
+
+void ImageWriter::ComputeLazyFieldsForImageClasses() {
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+}
+
+bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
+  c->ComputeName();
+  return true;
+}
+
+void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
+  if (!obj->GetClass()->IsStringClass()) {
+    return;
+  }
+  String* string = obj->AsString();
+  const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
+  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
+  typedef Set::const_iterator CacheIt;  // TODO: C++0x auto
+  for (CacheIt it = writer->dex_caches_.begin(), end = writer->dex_caches_.end(); it != end; ++it) {
+    DexCache* dex_cache = *it;
+    const DexFile& dex_file = *dex_cache->GetDexFile();
+    const DexFile::StringId* string_id = dex_file.FindStringId(utf16_string);
+    if (string_id != NULL) {
+      // This string occurs in this dex file, assign the dex cache entry.
+      uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
+      if (dex_cache->GetResolvedString(string_idx) == NULL) {
+        dex_cache->SetResolvedString(string_idx, string);
+      }
+    }
+  }
+}
+
+void ImageWriter::ComputeEagerResolvedStrings()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // TODO: Check image spaces only?
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  heap->FlushAllocStack();
+  heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this);
+}
+
+bool ImageWriter::IsImageClass(const Class* klass) {
+  return compiler_driver_.IsImageClass(ClassHelper(klass).GetDescriptor());
+}
+
+struct NonImageClasses {
+  ImageWriter* image_writer;
+  std::set<std::string>* non_image_classes;
+};
+
+void ImageWriter::PruneNonImageClasses() {
+  if (compiler_driver_.GetImageClasses() == NULL) {
+    return;
+  }
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+
+  // Make a list of classes we would like to prune.
+  std::set<std::string> non_image_classes;
+  NonImageClasses context;
+  context.image_writer = this;
+  context.non_image_classes = &non_image_classes;
+  class_linker->VisitClasses(NonImageClassesVisitor, &context);
+
+  // Remove the undesired classes from the class roots.
+  typedef std::set<std::string>::const_iterator ClassIt;  // TODO: C++0x auto
+  for (ClassIt it = non_image_classes.begin(), end = non_image_classes.end(); it != end; ++it) {
+    class_linker->RemoveClass((*it).c_str(), NULL);
+  }
+
+  // Clear references to removed classes from the DexCaches.
+  AbstractMethod* resolution_method = runtime->GetResolutionMethod();
+  typedef Set::const_iterator CacheIt;  // TODO: C++0x auto
+  for (CacheIt it = dex_caches_.begin(), end = dex_caches_.end(); it != end; ++it) {
+    DexCache* dex_cache = *it;
+    for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
+      Class* klass = dex_cache->GetResolvedType(i);
+      if (klass != NULL && !IsImageClass(klass)) {
+        dex_cache->SetResolvedType(i, NULL);
+        dex_cache->GetInitializedStaticStorage()->Set(i, NULL);
+      }
+    }
+    for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
+      AbstractMethod* method = dex_cache->GetResolvedMethod(i);
+      if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+        dex_cache->SetResolvedMethod(i, resolution_method);
+      }
+    }
+    for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
+      Field* field = dex_cache->GetResolvedField(i);
+      if (field != NULL && !IsImageClass(field->GetDeclaringClass())) {
+        dex_cache->SetResolvedField(i, NULL);
+      }
+    }
+  }
+}
+
+bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
+  NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
+  if (!context->image_writer->IsImageClass(klass)) {
+    context->non_image_classes->insert(ClassHelper(klass).GetDescriptor());
+  }
+  return true;
+}
+
+void ImageWriter::CheckNonImageClassesRemoved()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (compiler_driver_.GetImageClasses() == NULL) {
+    return;
+  }
+
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  Thread* self = Thread::Current();
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    heap->FlushAllocStack();
+  }
+
+  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  heap->GetLiveBitmap()->Walk(CheckNonImageClassesRemovedCallback, this);
+}
+
+void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
+  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
+  if (!obj->IsClass()) {
+    return;
+  }
+  Class* klass = obj->AsClass();
+  if (!image_writer->IsImageClass(klass)) {
+    image_writer->DumpImageClasses();
+    CHECK(image_writer->IsImageClass(klass)) << ClassHelper(klass).GetDescriptor()
+                                             << " " << PrettyDescriptor(klass);
+  }
+}
+
+void ImageWriter::DumpImageClasses() {
+  CompilerDriver::DescriptorSet* image_classes = compiler_driver_.GetImageClasses();
+  CHECK(image_classes != NULL);
+  typedef std::set<std::string>::const_iterator It;  // TODO: C++0x auto
+  for (It it = image_classes->begin(), end = image_classes->end(); it != end; ++it) {
+    LOG(INFO) << " " << *it;
+  }
+}
+
+void ImageWriter::CalculateNewObjectOffsetsCallback(Object* obj, void* arg) {
+  DCHECK(obj != NULL);
+  DCHECK(arg != NULL);
+  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
+
+  // if it is a string, we want to intern it if its not interned.
+  if (obj->GetClass()->IsStringClass()) {
+    // we must be an interned string that was forward referenced and already assigned
+    if (image_writer->IsImageOffsetAssigned(obj)) {
+      DCHECK_EQ(obj, obj->AsString()->Intern());
+      return;
+    }
+    SirtRef<String> interned(Thread::Current(), obj->AsString()->Intern());
+    if (obj != interned.get()) {
+      if (!image_writer->IsImageOffsetAssigned(interned.get())) {
+        // interned obj is after us, allocate its location early
+        image_writer->AssignImageOffset(interned.get());
+      }
+      // point those looking for this object to the interned version.
+      image_writer->SetImageOffset(obj, image_writer->GetImageOffset(interned.get()));
+      return;
+    }
+    // else (obj == interned), nothing to do but fall through to the normal case
+  }
+
+  image_writer->AssignImageOffset(obj);
+}
+
+ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  Class* object_array_class = class_linker->FindSystemClass("[Ljava/lang/Object;");
+  Thread* self = Thread::Current();
+
+  // build an Object[] of all the DexCaches used in the source_space_
+  ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class,
+                                                               dex_caches_.size());
+  int i = 0;
+  typedef Set::const_iterator It;  // TODO: C++0x auto
+  for (It it = dex_caches_.begin(), end = dex_caches_.end(); it != end; ++it, ++i) {
+    dex_caches->Set(i, *it);
+  }
+
+  // build an Object[] of the roots needed to restore the runtime
+  SirtRef<ObjectArray<Object> >
+      image_roots(self,
+                  ObjectArray<Object>::Alloc(self, object_array_class,
+                                             ImageHeader::kImageRootsMax));
+  image_roots->Set(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
+  image_roots->Set(ImageHeader::kCalleeSaveMethod,
+                   runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
+  image_roots->Set(ImageHeader::kRefsOnlySaveMethod,
+                   runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
+  image_roots->Set(ImageHeader::kRefsAndArgsSaveMethod,
+                   runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+  image_roots->Set(ImageHeader::kOatLocation,
+                   String::AllocFromModifiedUtf8(self, oat_file_->GetLocation().c_str()));
+  image_roots->Set(ImageHeader::kDexCaches,
+                   dex_caches);
+  image_roots->Set(ImageHeader::kClassRoots,
+                   class_linker->GetClassRoots());
+  for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
+    CHECK(image_roots->Get(i) != NULL);
+  }
+  return image_roots.get();
+}
+
+void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
+  CHECK_NE(0U, oat_loaded_size);
+  Thread* self = Thread::Current();
+  SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
+
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
+  DCHECK(!spaces.empty());
+  DCHECK_EQ(0U, image_end_);
+
+  // leave space for the header, but do not write it yet, we need to
+  // know where image_roots is going to end up
+  image_end_ += RoundUp(sizeof(ImageHeader), 8); // 64-bit-alignment
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    heap->FlushAllocStack();
+    // TODO: Image spaces only?
+    // TODO: Add InOrderWalk to heap bitmap.
+    const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
+    DCHECK(heap->GetLargeObjectsSpace()->GetLiveObjects()->IsEmpty());
+    // TODO: C++0x auto
+    typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+      gc::space::ContinuousSpace* space = *it;
+      space->GetLiveBitmap()->InOrderWalk(CalculateNewObjectOffsetsCallback, this);
+      DCHECK_LT(image_end_, image_->Size());
+    }
+    self->EndAssertNoThreadSuspension(old);
+  }
+
+  const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize);
+  const byte* oat_file_end = oat_file_begin + oat_loaded_size;
+  oat_data_begin_ = oat_file_begin + oat_data_offset;
+  const byte* oat_data_end = oat_data_begin_ + oat_file_->Size();
+
+  // return to write header at start of image with future location of image_roots
+  ImageHeader image_header(reinterpret_cast<uint32_t>(image_begin_),
+                           reinterpret_cast<uint32_t>(GetImageAddress(image_roots.get())),
+                           oat_file_->GetOatHeader().GetChecksum(),
+                           reinterpret_cast<uint32_t>(oat_file_begin),
+                           reinterpret_cast<uint32_t>(oat_data_begin_),
+                           reinterpret_cast<uint32_t>(oat_data_end),
+                           reinterpret_cast<uint32_t>(oat_file_end));
+  memcpy(image_->Begin(), &image_header, sizeof(image_header));
+
+  // Note that image_end_ is left at end of used space
+}
+
+void ImageWriter::CopyAndFixupObjects()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Thread* self = Thread::Current();
+  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  // TODO: heap validation can't handle this fix up pass
+  heap->DisableObjectValidation();
+  // TODO: Image spaces only?
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  heap->FlushAllocStack();
+  heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this);
+  self->EndAssertNoThreadSuspension(old_cause);
+}
+
+void ImageWriter::CopyAndFixupObjectsCallback(Object* object, void* arg) {
+  DCHECK(object != NULL);
+  DCHECK(arg != NULL);
+  const Object* obj = object;
+  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
+
+  // see GetLocalAddress for similar computation
+  size_t offset = image_writer->GetImageOffset(obj);
+  byte* dst = image_writer->image_->Begin() + offset;
+  const byte* src = reinterpret_cast<const byte*>(obj);
+  size_t n = obj->SizeOf();
+  DCHECK_LT(offset + n, image_writer->image_->Size());
+  memcpy(dst, src, n);
+  Object* copy = reinterpret_cast<Object*>(dst);
+  copy->SetField32(Object::MonitorOffset(), 0, false); // We may have inflated the lock during compilation.
+  image_writer->FixupObject(obj, copy);
+}
+
+void ImageWriter::FixupObject(const Object* orig, Object* copy) {
+  DCHECK(orig != NULL);
+  DCHECK(copy != NULL);
+  copy->SetClass(down_cast<Class*>(GetImageAddress(orig->GetClass())));
+  // TODO: special case init of pointers to malloc data (or removal of these pointers)
+  if (orig->IsClass()) {
+    FixupClass(orig->AsClass(), down_cast<Class*>(copy));
+  } else if (orig->IsObjectArray()) {
+    FixupObjectArray(orig->AsObjectArray<Object>(), down_cast<ObjectArray<Object>*>(copy));
+  } else if (orig->IsMethod()) {
+    FixupMethod(orig->AsMethod(), down_cast<AbstractMethod*>(copy));
+  } else {
+    FixupInstanceFields(orig, copy);
+  }
+}
+
+void ImageWriter::FixupClass(const Class* orig, Class* copy) {
+  FixupInstanceFields(orig, copy);
+  FixupStaticFields(orig, copy);
+}
+
+void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) {
+  FixupInstanceFields(orig, copy);
+
+  // OatWriter replaces the code_ with an offset value.
+  // Here we readjust to a pointer relative to oat_begin_
+  if (orig->IsAbstract()) {
+    // Code for abstract methods is set to the abstract method error stub when we load the image.
+    copy->SetEntryPointFromCompiledCode(NULL);
+    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+                                       (GetOatAddress(interpreter_to_interpreter_entry_offset_)));
+    return;
+  } else {
+    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+                                       (GetOatAddress(interpreter_to_quick_entry_offset_)));
+  }
+
+  if (orig == Runtime::Current()->GetResolutionMethod()) {
+#if defined(ART_USE_PORTABLE_COMPILER)
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
+    return;
+  }
+
+  // Use original code if it exists. Otherwise, set the code pointer to the resolution trampoline.
+  const byte* code = GetOatAddress(orig->GetOatCodeOffset());
+  if (code != NULL) {
+    copy->SetEntryPointFromCompiledCode(code);
+  } else {
+#if defined(ART_USE_PORTABLE_COMPILER)
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
+  }
+
+  if (orig->IsNative()) {
+    // The native method's pointer is set to a stub to lookup via dlsym when we load the image.
+    // Note this is not the code_ pointer, that is handled above.
+    copy->SetNativeMethod(NULL);
+  } else {
+    // normal (non-abstract non-native) methods have mapping tables to relocate
+    uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
+    const byte* mapping_table = GetOatAddress(mapping_table_off);
+    copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
+
+    uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
+    const byte* vmap_table = GetOatAddress(vmap_table_offset);
+    copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
+
+    uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
+    const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
+    copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+  }
+}
+
+void ImageWriter::FixupObjectArray(const ObjectArray<Object>* orig, ObjectArray<Object>* copy) {
+  for (int32_t i = 0; i < orig->GetLength(); ++i) {
+    const Object* element = orig->Get(i);
+    copy->SetPtrWithoutChecks(i, GetImageAddress(element));
+  }
+}
+
+void ImageWriter::FixupInstanceFields(const Object* orig, Object* copy) {
+  DCHECK(orig != NULL);
+  DCHECK(copy != NULL);
+  Class* klass = orig->GetClass();
+  DCHECK(klass != NULL);
+  FixupFields(orig,
+              copy,
+              klass->GetReferenceInstanceOffsets(),
+              false);
+}
+
+void ImageWriter::FixupStaticFields(const Class* orig, Class* copy) {
+  DCHECK(orig != NULL);
+  DCHECK(copy != NULL);
+  FixupFields(orig,
+              copy,
+              orig->GetReferenceStaticOffsets(),
+              true);
+}
+
+void ImageWriter::FixupFields(const Object* orig,
+                              Object* copy,
+                              uint32_t ref_offsets,
+                              bool is_static) {
+  if (ref_offsets != CLASS_WALK_SUPER) {
+    // Found a reference offset bitmap.  Fixup the specified offsets.
+    while (ref_offsets != 0) {
+      size_t right_shift = CLZ(ref_offsets);
+      MemberOffset byte_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+      const Object* ref = orig->GetFieldObject<const Object*>(byte_offset, false);
+      // Use SetFieldPtr to avoid card marking since we are writing to the image.
+      copy->SetFieldPtr(byte_offset, GetImageAddress(ref), false);
+      ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+    }
+  } else {
+    // There is no reference offset bitmap.  In the non-static case,
+    // walk up the class inheritance hierarchy and find reference
+    // offsets the hard way. In the static case, just consider this
+    // class.
+    for (const Class *klass = is_static ? orig->AsClass() : orig->GetClass();
+         klass != NULL;
+         klass = is_static ? NULL : klass->GetSuperClass()) {
+      size_t num_reference_fields = (is_static
+                                     ? klass->NumReferenceStaticFields()
+                                     : klass->NumReferenceInstanceFields());
+      for (size_t i = 0; i < num_reference_fields; ++i) {
+        Field* field = (is_static
+                        ? klass->GetStaticField(i)
+                        : klass->GetInstanceField(i));
+        MemberOffset field_offset = field->GetOffset();
+        const Object* ref = orig->GetFieldObject<const Object*>(field_offset, false);
+        // Use SetFieldPtr to avoid card marking since we are writing to the image.
+        copy->SetFieldPtr(field_offset, GetImageAddress(ref), false);
+      }
+    }
+  }
+  if (!is_static && orig->IsReferenceInstance()) {
+    // Fix-up referent, that isn't marked as an object field, for References.
+    Field* field = orig->GetClass()->FindInstanceField("referent", "Ljava/lang/Object;");
+    MemberOffset field_offset = field->GetOffset();
+    const Object* ref = orig->GetFieldObject<const Object*>(field_offset, false);
+    // Use SetFieldPtr to avoid card marking since we are writing to the image.
+    copy->SetFieldPtr(field_offset, GetImageAddress(ref), false);
+  }
+}
+
+static AbstractMethod* GetTargetMethod(const CompilerDriver::PatchInformation* patch)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile());
+  AbstractMethod* method = class_linker->ResolveMethod(patch->GetDexFile(),
+                                               patch->GetTargetMethodIdx(),
+                                               dex_cache,
+                                               NULL,
+                                               NULL,
+                                               patch->GetTargetInvokeType());
+  CHECK(method != NULL)
+    << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx();
+  CHECK(!method->IsRuntimeMethod())
+    << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx();
+  CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method)
+    << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
+    << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " "
+    << PrettyMethod(method);
+  return method;
+}
+
+void ImageWriter::PatchOatCodeAndMethods() {
+  Thread* self = Thread::Current();
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
+
+  typedef std::vector<const CompilerDriver::PatchInformation*> Patches;
+  const Patches& code_to_patch = compiler_driver_.GetCodeToPatch();
+  for (size_t i = 0; i < code_to_patch.size(); i++) {
+    const CompilerDriver::PatchInformation* patch = code_to_patch[i];
+    AbstractMethod* target = GetTargetMethod(patch);
+    uint32_t code = reinterpret_cast<uint32_t>(class_linker->GetOatCodeFor(target));
+    uint32_t code_base = reinterpret_cast<uint32_t>(&oat_file_->GetOatHeader());
+    uint32_t code_offset = code - code_base;
+    SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetOatAddress(code_offset)));
+  }
+
+  const Patches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
+  for (size_t i = 0; i < methods_to_patch.size(); i++) {
+    const CompilerDriver::PatchInformation* patch = methods_to_patch[i];
+    AbstractMethod* target = GetTargetMethod(patch);
+    SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetImageAddress(target)));
+  }
+
+  // Update the image header with the new checksum after patching
+  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
+  image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
+  self->EndAssertNoThreadSuspension(old_cause);
+}
+
+void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  const void* oat_code = class_linker->GetOatCodeFor(patch->GetDexFile(),
+                                                     patch->GetReferrerMethodIdx());
+  OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader());
+  // TODO: make this Thumb2 specific
+  uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(oat_code) & ~0x1);
+  uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
+#ifndef NDEBUG
+  const DexFile::MethodId& id = patch->GetDexFile().GetMethodId(patch->GetTargetMethodIdx());
+  uint32_t expected = reinterpret_cast<uint32_t>(&id);
+  uint32_t actual = *patch_location;
+  CHECK(actual == expected || actual == value) << std::hex
+    << "actual=" << actual
+    << "expected=" << expected
+    << "value=" << value;
+#endif
+  *patch_location = value;
+  oat_header.UpdateChecksum(patch_location, sizeof(value));
+}
+
+}  // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
new file mode 100644
index 0000000..9b0d671
--- /dev/null
+++ b/compiler/image_writer.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_IMAGE_WRITER_H_
+#define ART_SRC_IMAGE_WRITER_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+#include <set>
+#include <string>
+
+#include "driver/compiler_driver.h"
+#include "mem_map.h"
+#include "oat_file.h"
+#include "mirror/dex_cache.h"
+#include "os.h"
+#include "safe_map.h"
+#include "gc/space/space.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+// Write a Space built during compilation for use during execution.
+class ImageWriter {
+ public:
+  explicit ImageWriter(const CompilerDriver& compiler_driver)
+      : compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
+        oat_data_begin_(NULL), interpreter_to_interpreter_entry_offset_(0),
+        interpreter_to_quick_entry_offset_(0), portable_resolution_trampoline_offset_(0),
+        quick_resolution_trampoline_offset_(0) {}
+
+  ~ImageWriter() {}
+
+  bool Write(const std::string& image_filename,
+             uintptr_t image_begin,
+             const std::string& oat_filename,
+             const std::string& oat_location)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  uintptr_t GetOatDataBegin() {
+    return reinterpret_cast<uintptr_t>(oat_data_begin_);
+  }
+
+ private:
+  bool AllocMemory();
+
+  // we use the lock word to store the offset of the object in the image
+  void AssignImageOffset(mirror::Object* object)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(object != NULL);
+    SetImageOffset(object, image_end_);
+    image_end_ += RoundUp(object->SizeOf(), 8);  // 64-bit alignment
+    DCHECK_LT(image_end_, image_->Size());
+  }
+
+  void SetImageOffset(mirror::Object* object, size_t offset) {
+    DCHECK(object != NULL);
+    DCHECK_NE(offset, 0U);
+    DCHECK(!IsImageOffsetAssigned(object));
+    offsets_.Put(object, offset);
+  }
+
+  size_t IsImageOffsetAssigned(const mirror::Object* object) const {
+    DCHECK(object != NULL);
+    return offsets_.find(object) != offsets_.end();
+  }
+
+  size_t GetImageOffset(const mirror::Object* object) const {
+    DCHECK(object != NULL);
+    DCHECK(IsImageOffsetAssigned(object));
+    return offsets_.find(object)->second;
+  }
+
+  mirror::Object* GetImageAddress(const mirror::Object* object) const {
+    if (object == NULL) {
+      return NULL;
+    }
+    return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
+  }
+
+  mirror::Object* GetLocalAddress(const mirror::Object* object) const {
+    size_t offset = GetImageOffset(object);
+    byte* dst = image_->Begin() + offset;
+    return reinterpret_cast<mirror::Object*>(dst);
+  }
+
+  const byte* GetOatAddress(uint32_t offset) const {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+    // With Quick, code is within the OatFile, as there are all in one
+    // .o ELF object. However with Portable, the code is always in
+    // different .o ELF objects.
+    DCHECK_LT(offset, oat_file_->Size());
+#endif
+    if (offset == 0) {
+      return NULL;
+    }
+    return oat_data_begin_ + offset;
+  }
+
+  // Returns true if the class was in the original requested image classes list.
+  bool IsImageClass(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Debug aid that list of requested image classes.
+  void DumpImageClasses();
+
+  // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
+  void ComputeLazyFieldsForImageClasses()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
+  void ComputeEagerResolvedStrings();
+  static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Remove unwanted classes from various roots.
+  void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Verify unwanted classes removed.
+  void CheckNonImageClassesRemoved();
+  static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Lays out where the image objects will be at runtime.
+  void CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void CalculateNewObjectOffsetsCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Creates the contiguous image in memory and adjusts pointers.
+  void CopyAndFixupObjects();
+  static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupClass(const mirror::Class* orig, mirror::Class* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupMethod(const mirror::AbstractMethod* orig, mirror::AbstractMethod* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupObject(const mirror::Object* orig, mirror::Object* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupObjectArray(const mirror::ObjectArray<mirror::Object>* orig,
+                        mirror::ObjectArray<mirror::Object>* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupInstanceFields(const mirror::Object* orig, mirror::Object* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupStaticFields(const mirror::Class* orig, mirror::Class* copy)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupFields(const mirror::Object* orig, mirror::Object* copy, uint32_t ref_offsets,
+                   bool is_static)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Patches references in OatFile to expect runtime addresses.
+  void PatchOatCodeAndMethods()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+
+  const CompilerDriver& compiler_driver_;
+
+  // Map of Object to where it will be at runtime.
+  SafeMap<const mirror::Object*, size_t> offsets_;
+
+  // oat file with code for this image
+  OatFile* oat_file_;
+
+  // Memory mapped for generating the image.
+  UniquePtr<MemMap> image_;
+
+  // Offset to the free space in image_.
+  size_t image_end_;
+
+  // Beginning target image address for the output image.
+  byte* image_begin_;
+
+  // Beginning target oat address for the pointers from the output image to its oat file.
+  const byte* oat_data_begin_;
+
+  // Offset from oat_data_begin_ to the stubs.
+  uint32_t interpreter_to_interpreter_entry_offset_;
+  uint32_t interpreter_to_quick_entry_offset_;
+  uint32_t portable_resolution_trampoline_offset_;
+  uint32_t quick_resolution_trampoline_offset_;
+
+  // DexCaches seen while scanning for fixing up CodeAndDirectMethods
+  typedef std::set<mirror::DexCache*> Set;
+  Set dex_caches_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_IMAGE_WRITER_H_
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
new file mode 100644
index 0000000..560a146
--- /dev/null
+++ b/compiler/jni/jni_compiler_test.cc
@@ -0,0 +1,777 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "common_test.h"
+#include "dex_file.h"
+#include "gtest/gtest.h"
+#include "indirect_reference_table.h"
+#include "jni_internal.h"
+#include "mem_map.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/stack_trace_element.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar(JNIEnv*, jobject, jint count) {
+  return count + 1;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint count) {
+  return count + 1;
+}
+
+namespace art {
+
+class JniCompilerTest : public CommonTest {
+ protected:
+  void CompileForTest(jobject class_loader, bool direct,
+                      const char* method_name, const char* method_sig) {
+    ScopedObjectAccess soa(Thread::Current());
+    // Compile the native method before starting the runtime
+    mirror::Class* c = class_linker_->FindClass("LMyClassNatives;",
+                                                soa.Decode<mirror::ClassLoader*>(class_loader));
+    mirror::AbstractMethod* method;
+    if (direct) {
+      method = c->FindDirectMethod(method_name, method_sig);
+    } else {
+      method = c->FindVirtualMethod(method_name, method_sig);
+    }
+    ASSERT_TRUE(method != NULL) << method_name << " " << method_sig;
+    if (method->GetEntryPointFromCompiledCode() != NULL) {
+      return;
+    }
+    CompileMethod(method);
+    ASSERT_TRUE(method->GetEntryPointFromCompiledCode() != NULL) << method_name << " " << method_sig;
+  }
+
+  void SetUpForTest(bool direct, const char* method_name, const char* method_sig,
+                    void* native_fnptr) {
+    // Initialize class loader and compile method when runtime not started.
+    if (!runtime_->IsStarted()){
+      {
+        ScopedObjectAccess soa(Thread::Current());
+        class_loader_ = LoadDex("MyClassNatives");
+      }
+      CompileForTest(class_loader_, direct, method_name, method_sig);
+      // Start runtime.
+      Thread::Current()->TransitionFromSuspendedToRunnable();
+      bool started = runtime_->Start();
+      CHECK(started);
+    }
+    // JNI operations after runtime start.
+    env_ = Thread::Current()->GetJniEnv();
+    jklass_ = env_->FindClass("MyClassNatives");
+    ASSERT_TRUE(jklass_ != NULL) << method_name << " " << method_sig;
+
+    if (direct) {
+      jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig);
+    } else {
+      jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig);
+    }
+    ASSERT_TRUE(jmethod_ != NULL) << method_name << " " << method_sig;
+
+    if (native_fnptr != NULL) {
+      JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } };
+      ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1))
+              << method_name << " " << method_sig;
+    } else {
+      env_->UnregisterNatives(jklass_);
+    }
+
+    jmethodID constructor = env_->GetMethodID(jklass_, "<init>", "()V");
+    jobj_ = env_->NewObject(jklass_, constructor);
+    ASSERT_TRUE(jobj_ != NULL) << method_name << " " << method_sig;
+  }
+
+ public:
+  static jclass jklass_;
+  static jobject jobj_;
+  static jobject class_loader_;
+
+
+ protected:
+  JNIEnv* env_;
+  jmethodID jmethod_;
+};
+
+jclass JniCompilerTest::jklass_;
+jobject JniCompilerTest::jobj_;
+jobject JniCompilerTest::class_loader_;
+
+int gJava_MyClassNatives_foo_calls = 0;
+void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  Locks::mutator_lock_->AssertNotHeld(Thread::Current());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_foo_calls++;
+}
+
+TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "foo", "()V",
+               reinterpret_cast<void*>(&Java_MyClassNatives_foo));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_foo_calls);
+  env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+  EXPECT_EQ(1, gJava_MyClassNatives_foo_calls);
+  env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+  EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "bar", "(I)I",
+               NULL /* calling through stub will link with &Java_MyClassNatives_bar */);
+
+  ScopedObjectAccess soa(Thread::Current());
+  std::string reason;
+  ASSERT_TRUE(
+      Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode<mirror::ClassLoader*>(class_loader_),
+                                                         reason)) << reason;
+
+  jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
+  EXPECT_EQ(25, result);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "sbar", "(I)I",
+               NULL /* calling through stub will link with &Java_MyClassNatives_sbar */);
+
+  ScopedObjectAccess soa(Thread::Current());
+  std::string reason;
+  ASSERT_TRUE(
+      Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", soa.Decode<mirror::ClassLoader*>(class_loader_),
+                                                         reason)) << reason;
+
+  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
+  EXPECT_EQ(43, result);
+}
+
+int gJava_MyClassNatives_fooI_calls = 0;
+jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooI_calls++;
+  return x;
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooI", "(I)I",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooI));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooI_calls);
+  jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 42);
+  EXPECT_EQ(42, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooI_calls);
+  result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D);
+  EXPECT_EQ(static_cast<jint>(0xCAFED00D), result);
+  EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls);
+}
+
+int gJava_MyClassNatives_fooII_calls = 0;
+jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooII_calls++;
+  return x - y;  // non-commutative operator
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooII", "(II)I",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooII));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooII_calls);
+  jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 99, 10);
+  EXPECT_EQ(99 - 10, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooII_calls);
+  result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFEBABE,
+                                         0xCAFED00D);
+  EXPECT_EQ(static_cast<jint>(0xCAFEBABE - 0xCAFED00D), result);
+  EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls);
+}
+
+int gJava_MyClassNatives_fooJJ_calls = 0;
+jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooJJ_calls++;
+  return x - y;  // non-commutative operator
+}
+
+TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooJJ", "(JJ)J",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_calls);
+  jlong a = 0x1234567890ABCDEFll;
+  jlong b = 0xFEDCBA0987654321ll;
+  jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b);
+  EXPECT_EQ(a - b, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_calls);
+  result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a);
+  EXPECT_EQ(b - a, result);
+  EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls);
+}
+
+int gJava_MyClassNatives_fooDD_calls = 0;
+jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooDD_calls++;
+  return x - y;  // non-commutative operator
+}
+
+TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooDD", "(DD)D",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooDD));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls);
+  jdouble result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_,
+                                                    99.0, 10.0);
+  EXPECT_EQ(99.0 - 10.0, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooDD_calls);
+  jdouble a = 3.14159265358979323846;
+  jdouble b = 0.69314718055994530942;
+  result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b);
+  EXPECT_EQ(a - b, result);
+  EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls);
+}
+
+int gJava_MyClassNatives_fooJJ_synchronized_calls = 0;
+jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong x, jlong y) {
+  // 1 = thisObj
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooJJ_synchronized_calls++;
+  return x | y;
+}
+
+TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ_synchronized));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls);
+  jlong a = 0x1000000020000000ULL;
+  jlong b = 0x00ff000000aa0000ULL;
+  jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b);
+  EXPECT_EQ(a | b, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls);
+}
+
+int gJava_MyClassNatives_fooIOO_calls = 0;
+jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y,
+                            jobject z) {
+  // 3 = this + y + z
+  EXPECT_EQ(3U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+  gJava_MyClassNatives_fooIOO_calls++;
+  switch (x) {
+    case 1:
+      return y;
+    case 2:
+      return z;
+    default:
+      return thisObj;
+  }
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooIOO",
+               "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooIOO));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls);
+  jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls);
+
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, jklass_);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, NULL, jklass_);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, NULL, jklass_);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls);
+
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls);
+}
+
+int gJava_MyClassNatives_fooSII_calls = 0;
+jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
+  // 1 = klass
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
+  gJava_MyClassNatives_fooSII_calls++;
+  return x + y;
+}
+
+TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "fooSII", "(II)I",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooSII));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooSII_calls);
+  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30);
+  EXPECT_EQ(50, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls);
+}
+
+int gJava_MyClassNatives_fooSDD_calls = 0;
+jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) {
+  // 1 = klass
+  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
+  gJava_MyClassNatives_fooSDD_calls++;
+  return x - y;  // non-commutative operator
+}
+
+TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "fooSDD", "(DD)D",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooSDD));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls);
+  jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 99.0, 10.0);
+  EXPECT_EQ(99.0 - 10.0, result);
+  EXPECT_EQ(1, gJava_MyClassNatives_fooSDD_calls);
+  jdouble a = 3.14159265358979323846;
+  jdouble b = 0.69314718055994530942;
+  result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b);
+  EXPECT_EQ(a - b, result);
+  EXPECT_EQ(2, gJava_MyClassNatives_fooSDD_calls);
+}
+
+int gJava_MyClassNatives_fooSIOO_calls = 0;
+jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y,
+                             jobject z) {
+  // 3 = klass + y + z
+  EXPECT_EQ(3U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
+  gJava_MyClassNatives_fooSIOO_calls++;
+  switch (x) {
+    case 1:
+      return y;
+    case 2:
+      return z;
+    default:
+      return klass;
+  }
+}
+
+
+TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "fooSIOO",
+               "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooSIOO));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls);
+  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls);
+
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls);
+
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls);
+}
+
+int gJava_MyClassNatives_fooSSIOO_calls = 0;
+jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) {
+  // 3 = klass + y + z
+  EXPECT_EQ(3U, Thread::Current()->NumStackReferences());
+  EXPECT_EQ(kNative, Thread::Current()->GetState());
+  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
+  gJava_MyClassNatives_fooSSIOO_calls++;
+  switch (x) {
+    case 1:
+      return y;
+    case 2:
+      return z;
+    default:
+      return klass;
+  }
+}
+
+TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "fooSSIOO",
+               "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooSSIOO));
+
+  EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls);
+  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls);
+
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls);
+
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jklass_, result));
+  EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(jobj_, result));
+  EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
+  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls);
+}
+
+void Java_MyClassNatives_throwException(JNIEnv* env, jobject) {
+  jclass c = env->FindClass("java/lang/RuntimeException");
+  env->ThrowNew(c, "hello");
+}
+
+TEST_F(JniCompilerTest, ExceptionHandling) {
+  TEST_DISABLED_FOR_PORTABLE();
+  {
+    ASSERT_FALSE(runtime_->IsStarted());
+    ScopedObjectAccess soa(Thread::Current());
+    class_loader_ = LoadDex("MyClassNatives");
+
+    // all compilation needs to happen before Runtime::Start
+    CompileForTest(class_loader_, false, "foo", "()V");
+    CompileForTest(class_loader_, false, "throwException", "()V");
+    CompileForTest(class_loader_, false, "foo", "()V");
+  }
+  // Start runtime to avoid re-initialization in SetupForTest.
+  Thread::Current()->TransitionFromSuspendedToRunnable();
+  bool started = runtime_->Start();
+  CHECK(started);
+
+  gJava_MyClassNatives_foo_calls = 0;
+
+  // Check a single call of a JNI method is ok
+  SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
+  env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+  EXPECT_EQ(1, gJava_MyClassNatives_foo_calls);
+  EXPECT_FALSE(Thread::Current()->IsExceptionPending());
+
+  // Get class for exception we expect to be thrown
+  ScopedLocalRef<jclass> jlre(env_, env_->FindClass("java/lang/RuntimeException"));
+  SetUpForTest(false, "throwException", "()V",
+               reinterpret_cast<void*>(&Java_MyClassNatives_throwException));
+  // Call Java_MyClassNatives_throwException (JNI method that throws exception)
+  env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+  EXPECT_EQ(1, gJava_MyClassNatives_foo_calls);
+  EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
+  ScopedLocalRef<jthrowable> exception(env_, env_->ExceptionOccurred());
+  env_->ExceptionClear();
+  EXPECT_TRUE(env_->IsInstanceOf(exception.get(), jlre.get()));
+
+  // Check a single call of a JNI method is ok
+  SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
+  env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+  EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+}
+
+jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
+  if (i <= 0) {
+    // We want to check raw Object*/Array* below
+    ScopedObjectAccess soa(env);
+
+    // Build stack trace
+    jobject internal = Thread::Current()->CreateInternalStackTrace(soa);
+    jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal);
+    mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
+        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
+    EXPECT_TRUE(trace_array != NULL);
+    EXPECT_EQ(11, trace_array->GetLength());
+
+    // Check stack trace entries have expected values
+    for (int32_t i = 0; i < trace_array->GetLength(); ++i) {
+      EXPECT_EQ(-2, trace_array->Get(i)->GetLineNumber());
+      mirror::StackTraceElement* ste = trace_array->Get(i);
+      EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str());
+      EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str());
+      EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str());
+    }
+
+    // end recursion
+    return 0;
+  } else {
+    jclass jklass = env->FindClass("MyClassNatives");
+    EXPECT_TRUE(jklass != NULL);
+    jmethodID jmethod = env->GetMethodID(jklass, "fooI", "(I)I");
+    EXPECT_TRUE(jmethod != NULL);
+
+    // Recurse with i - 1
+    jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1);
+
+    // Return sum of all depths
+    return i + result;
+  }
+}
+
+TEST_F(JniCompilerTest, NativeStackTraceElement) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooI", "(I)I",
+               reinterpret_cast<void*>(&Java_MyClassNatives_nativeUpCall));
+  jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10);
+  EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result);
+}
+
+jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) {
+  return env->NewGlobalRef(x);
+}
+
+TEST_F(JniCompilerTest, ReturnGlobalRef) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_fooO));
+  jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_);
+  EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(result));
+  EXPECT_TRUE(env_->IsSameObject(result, jobj_));
+}
+
+jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) {
+  // Add 10 local references
+  ScopedObjectAccess soa(env);
+  for (int i = 0; i < 10; i++) {
+    soa.AddLocalReference<jobject>(soa.Decode<mirror::Object*>(thisObj));
+  }
+  return x+1;
+}
+
+TEST_F(JniCompilerTest, LocalReferenceTableClearingTest) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "fooI", "(I)I", reinterpret_cast<void*>(&local_ref_test));
+  // 1000 invocations of a method that adds 10 local references
+  for (int i = 0; i < 1000; i++) {
+    jint result = env_->CallIntMethod(jobj_, jmethod_, i);
+    EXPECT_TRUE(result == i + 1);
+  }
+}
+
+void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) {
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass));
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst));
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, src));
+  EXPECT_EQ(1234, src_pos);
+  EXPECT_EQ(5678, dst_pos);
+  EXPECT_EQ(9876, length);
+}
+
+TEST_F(JniCompilerTest, JavaLangSystemArrayCopy) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V",
+               reinterpret_cast<void*>(&my_arraycopy));
+  env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876);
+}
+
+jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) {
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe));
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj));
+  EXPECT_EQ(0x12345678ABCDEF88ll, offset);
+  EXPECT_EQ(static_cast<jint>(0xCAFEF00D), expected);
+  EXPECT_EQ(static_cast<jint>(0xEBADF00D), newval);
+  return JNI_TRUE;
+}
+
+TEST_F(JniCompilerTest, CompareAndSwapInt) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z",
+               reinterpret_cast<void*>(&my_casi));
+  jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, 0x12345678ABCDEF88ll, 0xCAFEF00D, 0xEBADF00D);
+  EXPECT_EQ(result, JNI_TRUE);
+}
+
+jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) {
+  EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
+  EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
+  EXPECT_EQ(0x12345678ABCDEF88ll, val1);
+  EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+  return 42;
+}
+
+TEST_F(JniCompilerTest, GetText) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
+               reinterpret_cast<void*>(&my_gettext));
+  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+                                          0x7FEDCBA987654321ll, jobj_);
+  EXPECT_EQ(result, 42);
+}
+
+TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", NULL);
+  // This space intentionally left blank. Just testing compilation succeeds.
+}
+
+// This should return jclass, but we're imitating a bug pattern.
+jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) {
+  return env->NewStringUTF("not a class!");
+}
+
+// This should return jclass, but we're imitating a bug pattern.
+jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclass) {
+  return env->NewStringUTF("not a class!");
+}
+
+TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldReturnClass));
+
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  // TODO: check type of returns with portable JNI compiler.
+  // This native method is bad, and tries to return a jstring as a jclass.
+  env_->CallObjectMethod(jobj_, jmethod_);
+  check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
+
+  // Here, we just call the method incorrectly; we should catch that too.
+  env_->CallVoidMethod(jobj_, jmethod_);
+  check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
+  env_->CallStaticVoidMethod(jklass_, jmethod_);
+  check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticVoidMethodV");
+}
+
+TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;",
+               reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldReturnClass));
+
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  // TODO: check type of returns with portable JNI compiler.
+  // This native method is bad, and tries to return a jstring as a jclass.
+  env_->CallStaticObjectMethod(jklass_, jmethod_);
+  check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
+
+  // Here, we just call the method incorrectly; we should catch that too.
+  env_->CallStaticVoidMethod(jklass_, jmethod_);
+  check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
+  env_->CallVoidMethod(jobj_, jmethod_);
+  check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallVoidMethodV");
+}
+
+// This should take jclass, but we're imitating a bug pattern.
+void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) {
+}
+
+// This should take jclass, but we're imitating a bug pattern.
+void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) {
+}
+
+TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
+               reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldTakeClass));
+
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  // We deliberately pass a bad second argument here.
+  env_->CallVoidMethod(jobj_, jmethod_, 123, env_->NewStringUTF("not a class!"));
+  check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.instanceMethodThatShouldTakeClass(int, java.lang.Class)");
+}
+
+TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) {
+  TEST_DISABLED_FOR_PORTABLE();
+  SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
+               reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldTakeClass));
+
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  // We deliberately pass a bad second argument here.
+  env_->CallStaticVoidMethod(jklass_, jmethod_, 123, env_->NewStringUTF("not a class!"));
+  check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.staticMethodThatShouldTakeClass(int, java.lang.Class)");
+}
+
+}  // namespace art
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
new file mode 100644
index 0000000..44d0c2d
--- /dev/null
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_compiler.h"
+
+#include "base/logging.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "llvm/compiler_llvm.h"
+#include "llvm/ir_builder.h"
+#include "llvm/llvm_compilation_unit.h"
+#include "llvm/runtime_support_llvm_func.h"
+#include "llvm/utils_llvm.h"
+#include "mirror/abstract_method.h"
+#include "runtime.h"
+#include "stack.h"
+#include "thread.h"
+
+#include <llvm/ADT/SmallVector.h>
+#include <llvm/IR/BasicBlock.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/Type.h>
+
+namespace art {
+namespace llvm {
+
+using namespace runtime_support;
+
+JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
+                         const CompilerDriver& driver,
+                         const DexCompilationUnit* dex_compilation_unit)
+: cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()),
+  context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
+  dex_compilation_unit_(dex_compilation_unit),
+  func_(NULL), elf_func_idx_(0) {
+
+  // Check: Ensure that JNI compiler will only get "native" method
+  CHECK(dex_compilation_unit->IsNative());
+}
+
+
+CompiledMethod* JniCompiler::Compile() {
+  const bool is_static = dex_compilation_unit_->IsStatic();
+  const bool is_synchronized = dex_compilation_unit_->IsSynchronized();
+  const DexFile* dex_file = dex_compilation_unit_->GetDexFile();
+  DexFile::MethodId const& method_id =
+      dex_file->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
+  char const return_shorty = dex_file->GetMethodShorty(method_id)[0];
+  ::llvm::Value* this_object_or_class_object;
+
+  uint32_t method_idx = dex_compilation_unit_->GetDexMethodIndex();
+  std::string func_name(StringPrintf("jni_%s",
+                                     MangleForJni(PrettyMethod(method_idx, *dex_file)).c_str()));
+  CreateFunction(func_name);
+
+  // Set argument name
+  ::llvm::Function::arg_iterator arg_begin(func_->arg_begin());
+  ::llvm::Function::arg_iterator arg_end(func_->arg_end());
+  ::llvm::Function::arg_iterator arg_iter(arg_begin);
+
+  DCHECK_NE(arg_iter, arg_end);
+  arg_iter->setName("method");
+  ::llvm::Value* method_object_addr = arg_iter++;
+
+  if (!is_static) {
+    // Non-static, the second argument is "this object"
+    this_object_or_class_object = arg_iter++;
+  } else {
+    // Load class object
+    this_object_or_class_object =
+        irb_.LoadFromObjectOffset(method_object_addr,
+                                  mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+                                  irb_.getJObjectTy(),
+                                  kTBAAConstJObject);
+  }
+  // Actual argument (ignore method and this object)
+  arg_begin = arg_iter;
+
+  // Count the number of Object* arguments
+  uint32_t sirt_size = 1;
+  // "this" object pointer for non-static
+  // "class" object pointer for static
+  for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
+#if !defined(NDEBUG)
+    arg_iter->setName(StringPrintf("a%u", i));
+#endif
+    if (arg_iter->getType() == irb_.getJObjectTy()) {
+      ++sirt_size;
+    }
+  }
+
+  // Shadow stack
+  ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(sirt_size);
+  ::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
+
+  // Store the dex pc
+  irb_.StoreToObjectOffset(shadow_frame_,
+                           ShadowFrame::DexPCOffset(),
+                           irb_.getInt32(DexFile::kDexNoIndex),
+                           kTBAAShadowFrame);
+
+  // Push the shadow frame
+  ::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
+  ::llvm::Value* old_shadow_frame =
+      irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, sirt_size);
+
+  // Get JNIEnv
+  ::llvm::Value* jni_env_object_addr =
+      irb_.Runtime().EmitLoadFromThreadOffset(Thread::JniEnvOffset().Int32Value(),
+                                              irb_.getJObjectTy(),
+                                              kTBAARuntimeInfo);
+
+  // Get callee code_addr
+  ::llvm::Value* code_addr =
+      irb_.LoadFromObjectOffset(method_object_addr,
+                                mirror::AbstractMethod::NativeMethodOffset().Int32Value(),
+                                GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(),
+                                                is_static, true)->getPointerTo(),
+                                kTBAARuntimeInfo);
+
+  // Load actual parameters
+  std::vector< ::llvm::Value*> args;
+
+  // The 1st parameter: JNIEnv*
+  args.push_back(jni_env_object_addr);
+
+  // Variables for GetElementPtr
+  ::llvm::Value* gep_index[] = {
+    irb_.getInt32(0), // No displacement for shadow frame pointer
+    irb_.getInt32(1), // SIRT
+    NULL,
+  };
+
+  size_t sirt_member_index = 0;
+
+  // Store the "this object or class object" to SIRT
+  gep_index[2] = irb_.getInt32(sirt_member_index++);
+  ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+                                                    irb_.getJObjectTy()->getPointerTo());
+  irb_.CreateStore(this_object_or_class_object, sirt_field_addr, kTBAAShadowFrame);
+  // Push the "this object or class object" to out args
+  this_object_or_class_object = irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy());
+  args.push_back(this_object_or_class_object);
+  // Store arguments to SIRT, and push back to args
+  for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) {
+    if (arg_iter->getType() == irb_.getJObjectTy()) {
+      // Store the reference type arguments to SIRT
+      gep_index[2] = irb_.getInt32(sirt_member_index++);
+      ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+                                                        irb_.getJObjectTy()->getPointerTo());
+      irb_.CreateStore(arg_iter, sirt_field_addr, kTBAAShadowFrame);
+      // Note null is placed in the SIRT but the jobject passed to the native code must be null
+      // (not a pointer into the SIRT as with regular references).
+      ::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull());
+      ::llvm::Value* arg =
+          irb_.CreateSelect(equal_null,
+                            irb_.getJNull(),
+                            irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy()));
+      args.push_back(arg);
+    } else {
+      args.push_back(arg_iter);
+    }
+  }
+
+  ::llvm::Value* saved_local_ref_cookie;
+  { // JniMethodStart
+    RuntimeId func_id = is_synchronized ? JniMethodStartSynchronized
+                                        : JniMethodStart;
+    ::llvm::SmallVector< ::llvm::Value*, 2> args;
+    if (is_synchronized) {
+      args.push_back(this_object_or_class_object);
+    }
+    args.push_back(irb_.Runtime().EmitGetCurrentThread());
+    saved_local_ref_cookie =
+        irb_.CreateCall(irb_.GetRuntime(func_id), args);
+  }
+
+  // Call!!!
+  ::llvm::Value* retval = irb_.CreateCall(code_addr, args);
+
+  { // JniMethodEnd
+    bool is_return_ref = return_shorty == 'L';
+    RuntimeId func_id =
+        is_return_ref ? (is_synchronized ? JniMethodEndWithReferenceSynchronized
+                                         : JniMethodEndWithReference)
+                      : (is_synchronized ? JniMethodEndSynchronized
+                                         : JniMethodEnd);
+    ::llvm::SmallVector< ::llvm::Value*, 4> args;
+    if (is_return_ref) {
+      args.push_back(retval);
+    }
+    args.push_back(saved_local_ref_cookie);
+    if (is_synchronized) {
+      args.push_back(this_object_or_class_object);
+    }
+    args.push_back(irb_.Runtime().EmitGetCurrentThread());
+
+    ::llvm::Value* decoded_jobject =
+        irb_.CreateCall(irb_.GetRuntime(func_id), args);
+
+    // Return decoded jobject if return reference.
+    if (is_return_ref) {
+      retval = decoded_jobject;
+    }
+  }
+
+  // Pop the shadow frame
+  irb_.Runtime().EmitPopShadowFrame(old_shadow_frame);
+
+  // Return!
+  if (return_shorty != 'V') {
+    irb_.CreateRet(retval);
+  } else {
+    irb_.CreateRetVoid();
+  }
+
+  // Verify the generated bitcode
+  VERIFY_LLVM_FUNCTION(*func_);
+
+  cunit_->Materialize();
+
+  return new CompiledMethod(cunit_->GetInstructionSet(),
+                            cunit_->GetElfObject(),
+                            func_name);
+}
+
+
+void JniCompiler::CreateFunction(const std::string& func_name) {
+  CHECK_NE(0U, func_name.size());
+
+  const bool is_static = dex_compilation_unit_->IsStatic();
+
+  // Get function type
+  ::llvm::FunctionType* func_type =
+    GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(), is_static, false);
+
+  // Create function
+  func_ = ::llvm::Function::Create(func_type, ::llvm::Function::InternalLinkage,
+                                   func_name, module_);
+
+  // Create basic block
+  ::llvm::BasicBlock* basic_block = ::llvm::BasicBlock::Create(*context_, "B0", func_);
+
+  // Set insert point
+  irb_.SetInsertPoint(basic_block);
+}
+
+
+::llvm::FunctionType* JniCompiler::GetFunctionType(uint32_t method_idx,
+                                                   bool is_static, bool is_native_function) {
+  // Get method signature
+  uint32_t shorty_size;
+  const char* shorty = dex_compilation_unit_->GetShorty(&shorty_size);
+  CHECK_GE(shorty_size, 1u);
+
+  // Get return type
+  ::llvm::Type* ret_type = irb_.getJType(shorty[0]);
+
+  // Get argument type
+  std::vector< ::llvm::Type*> args_type;
+
+  args_type.push_back(irb_.getJObjectTy()); // method object pointer
+
+  if (!is_static || is_native_function) {
+    // "this" object pointer for non-static
+    // "class" object pointer for static naitve
+    args_type.push_back(irb_.getJType('L'));
+  }
+
+  for (uint32_t i = 1; i < shorty_size; ++i) {
+    args_type.push_back(irb_.getJType(shorty[i]));
+  }
+
+  return ::llvm::FunctionType::get(ret_type, args_type, false);
+}
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/jni/portable/jni_compiler.h b/compiler/jni/portable/jni_compiler.h
new file mode 100644
index 0000000..a04277c
--- /dev/null
+++ b/compiler/jni/portable/jni_compiler.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
+#define ART_SRC_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
+
+#include <stdint.h>
+
+#include <string>
+
+namespace art {
+  class ClassLinker;
+  class CompiledMethod;
+  class CompilerDriver;
+  class DexFile;
+  class DexCompilationUnit;
+  namespace mirror {
+    class AbstractMethod;
+    class ClassLoader;
+    class DexCache;
+  }  // namespace mirror
+}  // namespace art
+
+namespace llvm {
+  class AllocaInst;
+  class Function;
+  class FunctionType;
+  class BasicBlock;
+  class LLVMContext;
+  class Module;
+  class Type;
+  class Value;
+}  // namespace llvm
+
+namespace art {
+namespace llvm {
+
+class LlvmCompilationUnit;
+class IRBuilder;
+
+class JniCompiler {
+ public:
+  JniCompiler(LlvmCompilationUnit* cunit,
+              const CompilerDriver& driver,
+              const DexCompilationUnit* dex_compilation_unit);
+
+  CompiledMethod* Compile();
+
+ private:
+  void CreateFunction(const std::string& symbol);
+
+  ::llvm::FunctionType* GetFunctionType(uint32_t method_idx,
+                                        bool is_static, bool is_target_function);
+
+ private:
+  LlvmCompilationUnit* cunit_;
+  const CompilerDriver* const driver_;
+
+  ::llvm::Module* module_;
+  ::llvm::LLVMContext* context_;
+  IRBuilder& irb_;
+
+  const DexCompilationUnit* const dex_compilation_unit_;
+
+  ::llvm::Function* func_;
+  uint16_t elf_func_idx_;
+};
+
+
+}  // namespace llvm
+}  // namespace art
+
+
+#endif  // ART_SRC_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
new file mode 100644
index 0000000..e9b09c5
--- /dev/null
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "calling_convention_arm.h"
+#include "oat/utils/arm/managed_register_arm.h"
+
+namespace art {
+namespace arm {
+
+// Calling convention
+
+ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F') {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  } else if (shorty[0] == 'D') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'J') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'V') {
+    return ArmManagedRegister::NoRegister();
+  } else {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  }
+}
+
+ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
+  return ArmManagedRegister::FromCoreRegister(R0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
+  return ArmManagedRegister::FromCoreRegister(R0);
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything moved to stack on entry.
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;
+}
+
+ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +   // displacement
+                  kPointerSize +                 // Method*
+                  (itr_slots_ * kPointerSize));  // offset into in args
+  return result;
+}
+
+const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on ARM to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1));
+      if (num_spills > 1) {
+        entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2));
+        if (num_spills > 2) {
+          entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+// JNI calling convention
+
+ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+  // or jclass for static methods and the JNIEnv. We start at the aligned register r2.
+  size_t padding = 0;
+  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+    if (IsParamALongOrDouble(cur_arg)) {
+      if ((cur_reg & 1) != 0) {
+        padding += 4;
+        cur_reg++;  // additional bump to ensure alignment
+      }
+      cur_reg++;  // additional bump to skip extra long word
+    }
+    cur_reg++;  // bump the iterator for every argument
+  }
+  padding_ = padding;
+
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R5));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R6));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R7));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R8));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R10));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R11));
+}
+
+uint32_t ArmJniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result =  1 << R5 | 1 << R6 | 1 << R7 | 1 << R8 | 1 << R10 | 1 << R11 | 1 << LR;
+  return result;
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
+  return ArmManagedRegister::FromCoreRegister(R2);
+}
+
+size_t ArmJniCallingConvention::FrameSize() {
+  // Method*, LR and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t ArmJniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+                 kStackAlignment);
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void ArmJniCallingConvention::Next() {
+  JniCallingConvention::Next();
+  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) &&
+      (arg_pos < NumArgs()) &&
+      IsParamALongOrDouble(arg_pos)) {
+    // itr_slots_ needs to be an even number, according to AAPCS.
+    if ((itr_slots_ & 0x1u) != 0) {
+      itr_slots_++;
+    }
+  }
+}
+
+bool ArmJniCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 4;
+}
+
+bool ArmJniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+static const Register kJniArgumentRegisters[] = {
+  R0, R1, R2, R3
+};
+ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
+  CHECK_LT(itr_slots_, 4u);
+  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+    CHECK_EQ(itr_slots_, 2u);
+    return ArmManagedRegister::FromRegisterPair(R2_R3);
+  } else {
+    return
+      ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
+  CHECK_GE(itr_slots_, 4u);
+  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kPointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv* less arguments in registers
+  return static_args + param_args + 1 - 4;
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
new file mode 100644
index 0000000..3787d45
--- /dev/null
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+#define ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace arm {
+
+class ArmManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~ArmManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
+};
+
+class ArmJniCallingConvention : public JniCallingConvention {
+ public:
+  explicit ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~ArmJniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual void Next();  // Override default behavior for AAPCS
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  // Padding to ensure longs and doubles are not split in AAPCS
+  size_t padding_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention);
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
new file mode 100644
index 0000000..5d5eaf2
--- /dev/null
+++ b/compiler/jni/quick/calling_convention.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention.h"
+
+#include "base/logging.h"
+#include "jni/quick/arm/calling_convention_arm.h"
+#include "jni/quick/mips/calling_convention_mips.h"
+#include "jni/quick/x86/calling_convention_x86.h"
+#include "utils.h"
+
+namespace art {
+
+// Offset of Method within the frame
+FrameOffset CallingConvention::MethodStackOffset() {
+  return displacement_;
+}
+
+// Managed runtime calling convention
+
+ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
+    bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) {
+  switch (instruction_set) {
+    case kArm:
+    case kThumb2:
+      return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    case kMips:
+      return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    case kX86:
+      return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+      return NULL;
+  }
+}
+
+bool ManagedRuntimeCallingConvention::HasNext() {
+  return itr_args_ < NumArgs();
+}
+
+void ManagedRuntimeCallingConvention::Next() {
+  CHECK(HasNext());
+  if (IsCurrentArgExplicit() &&  // don't query parameter type of implicit args
+      IsParamALongOrDouble(itr_args_)) {
+    itr_longs_and_doubles_++;
+    itr_slots_++;
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() {
+  // Static methods have no implicit arguments, others implicitly pass this
+  return IsStatic() || (itr_args_ != 0);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() {
+  return IsCurrentArgExplicit();  // any user parameter may be null
+}
+
+size_t ManagedRuntimeCallingConvention::CurrentParamSize() {
+  return ParamSize(itr_args_);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() {
+  return IsParamAReference(itr_args_);
+}
+
+// JNI calling convention
+
+JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
+                                                   const char* shorty,
+                                                   InstructionSet instruction_set) {
+  switch (instruction_set) {
+    case kArm:
+    case kThumb2:
+      return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+    case kMips:
+      return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+    case kX86:
+      return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+      return NULL;
+  }
+}
+
+size_t JniCallingConvention::ReferenceCount() const {
+  return NumReferenceArgs() + (IsStatic() ? 1 : 0);
+}
+
+FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
+  size_t start_of_sirt = SirtLinkOffset().Int32Value() +  kPointerSize;
+  size_t references_size = kPointerSize * ReferenceCount();  // size excluding header
+  return FrameOffset(start_of_sirt + references_size);
+}
+
+FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
+  // Segment state is 4 bytes long
+  return FrameOffset(SavedLocalReferenceCookieOffset().Int32Value() + 4);
+}
+
+bool JniCallingConvention::HasNext() {
+  if (itr_args_ <= kObjectOrClass) {
+    return true;
+  } else {
+    unsigned int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return arg_pos < NumArgs();
+  }
+}
+
+void JniCallingConvention::Next() {
+  CHECK(HasNext());
+  if (itr_args_ > kObjectOrClass) {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    if (IsParamALongOrDouble(arg_pos)) {
+      itr_longs_and_doubles_++;
+      itr_slots_++;
+    }
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool JniCallingConvention::IsCurrentParamAReference() {
+  switch (itr_args_) {
+    case kJniEnv:
+      return false;  // JNIEnv*
+    case kObjectOrClass:
+      return true;   // jobject or jclass
+    default: {
+      int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+      return IsParamAReference(arg_pos);
+    }
+  }
+}
+
+// Return position of SIRT entry holding reference at the current iterator
+// position
+FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+  CHECK(IsCurrentParamAReference());
+  CHECK_GT(SirtLinkOffset(), SirtNumRefsOffset());
+  // Address of 1st SIRT entry
+  int result = SirtLinkOffset().Int32Value() + kPointerSize;
+  result += itr_refs_ * kPointerSize;
+  CHECK_GT(result, SirtLinkOffset().Int32Value());
+  return FrameOffset(result);
+}
+
+size_t JniCallingConvention::CurrentParamSize() {
+  if (itr_args_ <= kObjectOrClass) {
+    return kPointerSize;  // JNIEnv or jobject/jclass
+  } else {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return ParamSize(arg_pos);
+  }
+}
+
+size_t JniCallingConvention::NumberOfExtraArgumentsForJni() {
+  // The first argument is the JNIEnv*.
+  // Static methods have an extra argument which is the jclass.
+  return IsStatic() ? 2 : 1;
+}
+
+}  // namespace art
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
new file mode 100644
index 0000000..121d1f8
--- /dev/null
+++ b/compiler/jni/quick/calling_convention.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+#define ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+
+#include <vector>
+#include "oat/utils/managed_register.h"
+#include "stack_indirect_reference_table.h"
+#include "thread.h"
+
+namespace art {
+
+// Top-level abstraction for different calling conventions
+class CallingConvention {
+ public:
+  bool IsReturnAReference() const { return shorty_[0] == 'L'; }
+
+  Primitive::Type GetReturnType() const {
+    return Primitive::GetType(shorty_[0]);
+  }
+
+  size_t SizeOfReturnValue() const {
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+
+  // Register that holds result of this method invocation.
+  virtual ManagedRegister ReturnRegister() = 0;
+  // Register reserved for scratch usage during procedure calls.
+  virtual ManagedRegister InterproceduralScratchRegister() = 0;
+
+  // Offset of Method within the frame
+  FrameOffset MethodStackOffset();
+
+  // Iterator interface
+
+  // Place iterator at start of arguments. The displacement is applied to
+  // frame offset methods to account for frames which may be on the stack
+  // below the one being iterated over.
+  void ResetIterator(FrameOffset displacement) {
+    displacement_ = displacement;
+    itr_slots_ = 0;
+    itr_args_ = 0;
+    itr_refs_ = 0;
+    itr_longs_and_doubles_ = 0;
+  }
+
+  virtual ~CallingConvention() {}
+
+ protected:
+  CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+        shorty_(shorty) {
+    num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
+    num_ref_args_ = is_static ? 0 : 1;  // The implicit this pointer.
+    num_long_or_double_args_ = 0;
+    for (size_t i = 1; i < strlen(shorty); i++) {
+      char ch = shorty_[i];
+      if (ch == 'L') {
+        num_ref_args_++;
+      } else if ((ch == 'D') || (ch == 'J')) {
+        num_long_or_double_args_++;
+      }
+    }
+  }
+
+  bool IsStatic() const {
+    return is_static_;
+  }
+  bool IsSynchronized() const {
+    return is_synchronized_;
+  }
+  bool IsParamALongOrDouble(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return false;  // this argument
+    }
+    char ch = shorty_[param];
+    return (ch == 'J' || ch == 'D');
+  }
+  bool IsParamAReference(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return true;  // this argument
+    }
+    return shorty_[param] == 'L';
+  }
+  size_t NumArgs() const {
+    return num_args_;
+  }
+  size_t NumLongOrDoubleArgs() const {
+    return num_long_or_double_args_;
+  }
+  size_t NumReferenceArgs() const {
+    return num_ref_args_;
+  }
+  size_t ParamSize(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return kPointerSize;  // this argument
+    }
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+  const char* GetShorty() const {
+    return shorty_.c_str();
+  }
+  // The slot number for current calling_convention argument.
+  // Note that each slot is 32-bit. When the current argument is bigger
+  // than 32 bits, return the first slot number for this argument.
+  unsigned int itr_slots_;
+  // The number of references iterated past
+  unsigned int itr_refs_;
+  // The argument number along argument list for current argument
+  unsigned int itr_args_;
+  // Number of longs and doubles seen along argument list
+  unsigned int itr_longs_and_doubles_;
+  // Space for frames below this on the stack
+  FrameOffset displacement_;
+
+ private:
+  const bool is_static_;
+  const bool is_synchronized_;
+  std::string shorty_;
+  size_t num_args_;
+  size_t num_ref_args_;
+  size_t num_long_or_double_args_;
+};
+
+// Abstraction for managed code's calling conventions
+// | { Incoming stack args } |
+// | { Prior Method* }       | <-- Prior SP
+// | { Return address }      |
+// | { Callee saves }        |
+// | { Spills ... }          |
+// | { Outgoing stack args } |
+// | { Method* }             | <-- SP
+class ManagedRuntimeCallingConvention : public CallingConvention {
+ public:
+  static ManagedRuntimeCallingConvention* Create(bool is_static, bool is_synchronized,
+                                                 const char* shorty,
+                                                 InstructionSet instruction_set);
+
+  // Register that holds the incoming method argument
+  virtual ManagedRegister MethodRegister() = 0;
+
+  // Iterator interface
+  bool HasNext();
+  void Next();
+  bool IsCurrentParamAReference();
+  bool IsCurrentArgExplicit();  // ie a non-implict argument such as this
+  bool IsCurrentArgPossiblyNull();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  virtual ~ManagedRuntimeCallingConvention() {}
+
+  // Registers to spill to caller's out registers on entry.
+  virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+
+ protected:
+  ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : CallingConvention(is_static, is_synchronized, shorty) {}
+};
+
+// Abstraction for JNI calling conventions
+// | { Incoming stack args }         | <-- Prior SP
+// | { Return address }              |
+// | { Callee saves }                |     ([1])
+// | { Return value spill }          |     (live on return slow paths)
+// | { Local Ref. Table State }      |
+// | { Stack Indirect Ref. Table     |
+// |   num. refs./link }             |     (here to prior SP is frame size)
+// | { Method* }                     | <-- Anchor SP written to thread
+// | { Outgoing stack args }         | <-- SP at point of call
+// | Native frame                    |
+//
+// [1] We must save all callee saves here to enable any exception throws to restore
+// callee saves for frames above this one.
+class JniCallingConvention : public CallingConvention {
+ public:
+  static JniCallingConvention* Create(bool is_static, bool is_synchronized, const char* shorty,
+                                      InstructionSet instruction_set);
+
+  // Size of frame excluding space for outgoing args (its assumed Method* is
+  // always at the bottom of a frame, but this doesn't work for outgoing
+  // native args). Includes alignment.
+  virtual size_t FrameSize() = 0;
+  // Size of outgoing arguments, including alignment
+  virtual size_t OutArgSize() = 0;
+  // Number of references in stack indirect reference table
+  size_t ReferenceCount() const;
+  // Location where the segment state of the local indirect reference table is saved
+  FrameOffset SavedLocalReferenceCookieOffset() const;
+  // Location where the return value of a call can be squirreled if another
+  // call is made following the native call
+  FrameOffset ReturnValueSaveLocation() const;
+  // Register that holds result if it is integer.
+  virtual ManagedRegister IntReturnRegister() = 0;
+
+  // Callee save registers to spill prior to native code (which may clobber)
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const = 0;
+
+  // Spill mask values
+  virtual uint32_t CoreSpillMask() const = 0;
+  virtual uint32_t FpSpillMask() const = 0;
+
+  // An extra scratch register live after the call
+  virtual ManagedRegister ReturnScratchRegister() const = 0;
+
+  // Iterator interface
+  bool HasNext();
+  virtual void Next();
+  bool IsCurrentParamAReference();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  // Iterator interface extension for JNI
+  FrameOffset CurrentParamSirtEntryOffset();
+
+  // Position of SIRT and interior fields
+  FrameOffset SirtOffset() const {
+    return FrameOffset(displacement_.Int32Value() +
+                       kPointerSize);  // above Method*
+  }
+  FrameOffset SirtNumRefsOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::NumberOfReferencesOffset());
+  }
+  FrameOffset SirtLinkOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::LinkOffset());
+  }
+
+  virtual ~JniCallingConvention() {}
+
+ protected:
+  // Named iterator positions
+  enum IteratorPos {
+    kJniEnv = 0,
+    kObjectOrClass = 1
+  };
+
+  explicit JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : CallingConvention(is_static, is_synchronized, shorty) {}
+
+  // Number of stack slots for outgoing arguments, above which the SIRT is
+  // located
+  virtual size_t NumberOfOutgoingStackArgs() = 0;
+
+ protected:
+  size_t NumberOfExtraArgumentsForJni();
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
new file mode 100644
index 0000000..fa227f7
--- /dev/null
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "calling_convention.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
+#include "disassembler.h"
+#include "jni_internal.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/utils/assembler.h"
+#include "oat/utils/managed_register.h"
+#include "oat/utils/arm/managed_register_arm.h"
+#include "oat/utils/mips/managed_register_mips.h"
+#include "oat/utils/x86/managed_register_x86.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+#define __ jni_asm->
+
+namespace art {
+
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size);
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg);
+
+// Generate the JNI bridge for the given method, general contract:
+// - Arguments are in the managed runtime format, either on stack or in
+//   registers, a reference to the method object is supplied as part of this
+//   convention.
+//
+CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
+                                            uint32_t access_flags, uint32_t method_idx,
+                                            const DexFile& dex_file) {
+  const bool is_native = (access_flags & kAccNative) != 0;
+  CHECK(is_native);
+  const bool is_static = (access_flags & kAccStatic) != 0;
+  const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
+  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+  InstructionSet instruction_set = compiler.GetInstructionSet();
+  if (instruction_set == kThumb2) {
+    instruction_set = kArm;
+  }
+  // Calling conventions used to iterate over parameters to method
+  UniquePtr<JniCallingConvention> main_jni_conv(
+      JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+  bool reference_return = main_jni_conv->IsReturnAReference();
+
+  UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
+      ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+
+  // Calling conventions to call into JNI method "end" possibly passing a returned reference, the
+  //     method and the current thread.
+  size_t jni_end_arg_count = 0;
+  if (reference_return) { jni_end_arg_count++; }
+  if (is_synchronized) { jni_end_arg_count++; }
+  const char* jni_end_shorty = jni_end_arg_count == 0 ? "I"
+                                                        : (jni_end_arg_count == 1 ? "II" : "III");
+  UniquePtr<JniCallingConvention> end_jni_conv(
+      JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
+
+
+  // Assembler that holds generated instructions
+  UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
+  bool should_disassemble = false;
+
+  // Offsets into data structures
+  // TODO: if cross compiling these offsets are for the host not the target
+  const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
+  const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
+  const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
+
+  // 1. Build the frame saving all callee saves
+  const size_t frame_size(main_jni_conv->FrameSize());
+  const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
+  __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+
+  // 2. Set up the StackIndirectReferenceTable
+  mr_conv->ResetIterator(FrameOffset(frame_size));
+  main_jni_conv->ResetIterator(FrameOffset(0));
+  __ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
+                           main_jni_conv->ReferenceCount(),
+                           mr_conv->InterproceduralScratchRegister());
+  __ CopyRawPtrFromThread(main_jni_conv->SirtLinkOffset(),
+                          Thread::TopSirtOffset(),
+                          mr_conv->InterproceduralScratchRegister());
+  __ StoreStackOffsetToThread(Thread::TopSirtOffset(),
+                              main_jni_conv->SirtOffset(),
+                              mr_conv->InterproceduralScratchRegister());
+
+  // 3. Place incoming reference arguments into SIRT
+  main_jni_conv->Next();  // Skip JNIEnv*
+  // 3.5. Create Class argument for static methods out of passed method
+  if (is_static) {
+    FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    // Check sirt offset is within frame
+    CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+    __ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
+               mr_conv->MethodRegister(), mirror::AbstractMethod::DeclaringClassOffset());
+    __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
+    __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister());
+    main_jni_conv->Next();  // in SIRT so move to next argument
+  }
+  while (mr_conv->HasNext()) {
+    CHECK(main_jni_conv->HasNext());
+    bool ref_param = main_jni_conv->IsCurrentParamAReference();
+    CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+    // References need placing in SIRT and the entry value passing
+    if (ref_param) {
+      // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+      // must be NULL
+      FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+      // Check SIRT offset is within frame and doesn't run into the saved segment state
+      CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+      CHECK_NE(sirt_offset.Uint32Value(),
+               main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
+      bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+      bool input_on_stack = mr_conv->IsCurrentParamOnStack();
+      CHECK(input_in_reg || input_on_stack);
+
+      if (input_in_reg) {
+        ManagedRegister in_reg  =  mr_conv->CurrentParamRegister();
+        __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
+        __ StoreRef(sirt_offset, in_reg);
+      } else if (input_on_stack) {
+        FrameOffset in_off  = mr_conv->CurrentParamStackOffset();
+        __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
+        __ CopyRef(sirt_offset, in_off,
+                   mr_conv->InterproceduralScratchRegister());
+      }
+    }
+    mr_conv->Next();
+    main_jni_conv->Next();
+  }
+
+  // 4. Write out the end of the quick frames.
+  __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
+  __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0,
+                            mr_conv->InterproceduralScratchRegister());
+
+  // 5. Move frame down to allow space for out going args.
+  const size_t main_out_arg_size = main_jni_conv->OutArgSize();
+  const size_t end_out_arg_size = end_jni_conv->OutArgSize();
+  const size_t max_out_arg_size = std::max(main_out_arg_size, end_out_arg_size);
+  __ IncreaseFrameSize(max_out_arg_size);
+
+
+  // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
+  //    can occur. The result is the saved JNI local state that is restored by the exit call. We
+  //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
+  //    arguments.
+  uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+                                        : ENTRYPOINT_OFFSET(pJniMethodStart);
+  main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+  FrameOffset locked_object_sirt_offset(0);
+  if (is_synchronized) {
+    // Pass object for locking.
+    main_jni_conv->Next();  // Skip JNIEnv.
+    locked_object_sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    if (main_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+                         mr_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+    main_jni_conv->Next();
+  }
+  if (main_jni_conv->IsCurrentParamInRegister()) {
+    __ GetCurrentThread(main_jni_conv->CurrentParamRegister());
+    __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start),
+            main_jni_conv->InterproceduralScratchRegister());
+  } else {
+    __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
+                        main_jni_conv->InterproceduralScratchRegister());
+    __ Call(ThreadOffset(jni_start), main_jni_conv->InterproceduralScratchRegister());
+  }
+  if (is_synchronized) {  // Check for exceptions from monitor enter.
+    __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
+  }
+  FrameOffset saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset();
+  __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4);
+
+  // 7. Iterate over arguments placing values from managed calling convention in
+  //    to the convention required for a native call (shuffling). For references
+  //    place an index/pointer to the reference after checking whether it is
+  //    NULL (which must be encoded as NULL).
+  //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
+  //    give as many free registers for the shuffle as possible
+  mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+  uint32_t args_count = 0;
+  while (mr_conv->HasNext()) {
+    args_count++;
+    mr_conv->Next();
+  }
+
+  // Do a backward pass over arguments, so that the generated code will be "mov
+  // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
+  // TODO: A reverse iterator to improve readability.
+  for (uint32_t i = 0; i < args_count; ++i) {
+    mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    main_jni_conv->Next();  // Skip JNIEnv*.
+    if (is_static) {
+      main_jni_conv->Next();  // Skip Class for now.
+    }
+    // Skip to the argument we're interested in.
+    for (uint32_t j = 0; j < args_count - i - 1; ++j) {
+      mr_conv->Next();
+      main_jni_conv->Next();
+    }
+    CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size);
+  }
+  if (is_static) {
+    // Create argument for Class
+    mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    main_jni_conv->Next();  // Skip JNIEnv*
+    FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    if (main_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, sirt_offset,
+                         mr_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+  }
+
+  // 8. Create 1st argument, the JNI environment ptr.
+  main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+  // Register that will hold local indirect reference table
+  if (main_jni_conv->IsCurrentParamInRegister()) {
+    ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
+    DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
+    __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
+  } else {
+    FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
+    __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(),
+                            main_jni_conv->InterproceduralScratchRegister());
+  }
+
+  // 9. Plant call to native code associated with method.
+  __ Call(main_jni_conv->MethodStackOffset(), mirror::AbstractMethod::NativeMethodOffset(),
+          mr_conv->InterproceduralScratchRegister());
+
+  // 10. Fix differences in result widths.
+  if (instruction_set == kX86) {
+    if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
+        main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
+      __ SignExtend(main_jni_conv->ReturnRegister(),
+                    Primitive::ComponentSize(main_jni_conv->GetReturnType()));
+    } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
+               main_jni_conv->GetReturnType() == Primitive::kPrimChar) {
+      __ ZeroExtend(main_jni_conv->ReturnRegister(),
+                    Primitive::ComponentSize(main_jni_conv->GetReturnType()));
+    }
+  }
+
+  // 11. Save return value
+  FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
+  if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+    if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+        return_save_location.Uint32Value() % 8 != 0) {
+      // Ensure doubles are 8-byte aligned for MIPS
+      return_save_location = FrameOffset(return_save_location.Uint32Value() + kPointerSize);
+    }
+    CHECK_LT(return_save_location.Uint32Value(), frame_size+main_out_arg_size);
+    __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
+  }
+
+  // 12. Call into JNI method end possibly passing a returned reference, the method and the current
+  //     thread.
+  end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
+  uintptr_t jni_end;
+  if (reference_return) {
+    // Pass result.
+    jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+                              : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+    SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
+    end_jni_conv->Next();
+  } else {
+    jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+                              : ENTRYPOINT_OFFSET(pJniMethodEnd);
+  }
+  // Pass saved local reference state.
+  if (end_jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+    __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4);
+  } else {
+    ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+    __ Load(out_reg, saved_cookie_offset, 4);
+  }
+  end_jni_conv->Next();
+  if (is_synchronized) {
+    // Pass object for unlocking.
+    if (end_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+                         end_jni_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+    end_jni_conv->Next();
+  }
+  if (end_jni_conv->IsCurrentParamInRegister()) {
+    __ GetCurrentThread(end_jni_conv->CurrentParamRegister());
+    __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end),
+            end_jni_conv->InterproceduralScratchRegister());
+  } else {
+    __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
+                        end_jni_conv->InterproceduralScratchRegister());
+    __ Call(ThreadOffset(jni_end), end_jni_conv->InterproceduralScratchRegister());
+  }
+
+  // 13. Reload return value
+  if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+    __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue());
+  }
+
+  // 14. Move frame up now we're done with the out arg space.
+  __ DecreaseFrameSize(max_out_arg_size);
+
+  // 15. Process pending exceptions from JNI call or monitor exit.
+  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0);
+
+  // 16. Remove activation - no need to restore callee save registers because we didn't clobber
+  //     them.
+  __ RemoveFrame(frame_size, std::vector<ManagedRegister>());
+
+  // 17. Finalize code generation
+  __ EmitSlowPaths();
+  size_t cs = __ CodeSize();
+  std::vector<uint8_t> managed_code(cs);
+  MemoryRegion code(&managed_code[0], managed_code.size());
+  __ FinalizeInstructions(code);
+  if (should_disassemble) {
+    UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
+    disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
+  }
+  return new CompiledMethod(instruction_set,
+                            managed_code,
+                            frame_size,
+                            main_jni_conv->CoreSpillMask(),
+                            main_jni_conv->FpSpillMask());
+}
+
+// Copy a single parameter from the managed to the JNI calling convention
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size) {
+  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+  bool output_in_reg = jni_conv->IsCurrentParamInRegister();
+  FrameOffset sirt_offset(0);
+  bool null_allowed = false;
+  bool ref_param = jni_conv->IsCurrentParamAReference();
+  CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+  // input may be in register, on stack or both - but not none!
+  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
+  if (output_in_reg) {  // output shouldn't straddle registers and stack
+    CHECK(!jni_conv->IsCurrentParamOnStack());
+  } else {
+    CHECK(jni_conv->IsCurrentParamOnStack());
+  }
+  // References need placing in SIRT and the entry address passing
+  if (ref_param) {
+    null_allowed = mr_conv->IsCurrentArgPossiblyNull();
+    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
+    // passed to the native code must be null (not a pointer into the SIRT
+    // as with regular references).
+    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check SIRT offset is within frame.
+    CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+  }
+  if (input_in_reg && output_in_reg) {
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+    } else {
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling move
+        __ Move(out_reg, in_reg, mr_conv->CurrentParamSize());
+      } else {
+        UNIMPLEMENTED(FATAL);  // we currently don't expect to see this case
+      }
+    }
+  } else if (!input_in_reg && !output_in_reg) {
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    if (ref_param) {
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
+    }
+  } else if (!input_in_reg && output_in_reg) {
+    FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    // Check that incoming stack arguments are above the current stack frame.
+    CHECK_GT(in_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Load(out_reg, in_off, param_size);
+    }
+  } else {
+    CHECK(input_in_reg && !output_in_reg);
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    // Check outgoing argument is within frame
+    CHECK_LT(out_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      // TODO: recycle value in in_reg rather than reload from SIRT
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling store
+        __ Store(out_off, in_reg, param_size);
+      } else {
+        // store where input straddles registers and stack
+        CHECK_EQ(param_size, 8u);
+        FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
+      }
+    }
+  }
+}
+
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg) {
+  if (jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset dest = jni_conv->CurrentParamStackOffset();
+    __ StoreRawPtr(dest, in_reg);
+  } else {
+    if (!jni_conv->CurrentParamRegister().Equals(in_reg)) {
+      __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize());
+    }
+  }
+}
+
+}  // namespace art
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler,
+                                                         uint32_t access_flags, uint32_t method_idx,
+                                                         const art::DexFile& dex_file) {
+  return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file);
+}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
new file mode 100644
index 0000000..053ab44
--- /dev/null
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips.h"
+
+#include "base/logging.h"
+#include "oat/utils/mips/managed_register_mips.h"
+
+namespace art {
+namespace mips {
+
+// Calling convention
+ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return MipsManagedRegister::FromCoreRegister(T9);
+}
+
+ManagedRegister MipsJniCallingConvention::InterproceduralScratchRegister() {
+  return MipsManagedRegister::FromCoreRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F') {
+    return MipsManagedRegister::FromFRegister(F0);
+  } else if (shorty[0] == 'D') {
+    return MipsManagedRegister::FromDRegister(D0);
+  } else if (shorty[0] == 'J') {
+    return MipsManagedRegister::FromRegisterPair(V0_V1);
+  } else if (shorty[0] == 'V') {
+    return MipsManagedRegister::NoRegister();
+  } else {
+    return MipsManagedRegister::FromCoreRegister(V0);
+  }
+}
+
+ManagedRegister MipsManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister MipsJniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister MipsJniCallingConvention::IntReturnRegister() {
+  return MipsManagedRegister::FromCoreRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister MipsManagedRuntimeCallingConvention::MethodRegister() {
+  return MipsManagedRegister::FromCoreRegister(A0);
+}
+
+bool MipsManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything moved to stack on entry.
+}
+
+bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;
+}
+
+ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +   // displacement
+                  kPointerSize +                 // Method*
+                  (itr_slots_ * kPointerSize));  // offset into in args
+  return result;
+}
+
+const std::vector<ManagedRegister>& MipsManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on MIPS to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A1));
+      if (num_spills > 1) {
+        entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A2));
+        if (num_spills > 2) {
+          entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A3));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+// JNI calling convention
+
+MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+  // or jclass for static methods and the JNIEnv. We start at the aligned register A2.
+  size_t padding = 0;
+  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+    if (IsParamALongOrDouble(cur_arg)) {
+      if ((cur_reg & 1) != 0) {
+        padding += 4;
+        cur_reg++;  // additional bump to ensure alignment
+      }
+      cur_reg++;  // additional bump to skip extra long word
+    }
+    cur_reg++;  // bump the iterator for every argument
+  }
+  padding_ = padding;
+
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T0));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T1));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T2));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T3));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T4));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T5));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T6));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T7));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T8));
+}
+
+uint32_t MipsJniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result = 1 << T0 | 1 << T1 | 1 << T2 | 1 << T3 | 1 << T4 | 1 << T5 | 1 << T6 |
+           1 << T7 | 1 << T8 | 1 << RA;
+  return result;
+}
+
+ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
+  return MipsManagedRegister::FromCoreRegister(AT);
+}
+
+size_t MipsJniCallingConvention::FrameSize() {
+  // Method*, LR and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t MipsJniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+                 kStackAlignment);
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void MipsJniCallingConvention::Next() {
+  JniCallingConvention::Next();
+  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) &&
+      (arg_pos < NumArgs()) &&
+      IsParamALongOrDouble(arg_pos)) {
+    // itr_slots_ needs to be an even number, according to AAPCS.
+    if ((itr_slots_ & 0x1u) != 0) {
+      itr_slots_++;
+    }
+  }
+}
+
+bool MipsJniCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 4;
+}
+
+bool MipsJniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+static const Register kJniArgumentRegisters[] = {
+  A0, A1, A2, A3
+};
+ManagedRegister MipsJniCallingConvention::CurrentParamRegister() {
+  CHECK_LT(itr_slots_, 4u);
+  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+    CHECK_EQ(itr_slots_, 2u);
+    return MipsManagedRegister::FromRegisterPair(A2_A3);
+  } else {
+    return
+      MipsManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() {
+  CHECK_GE(itr_slots_, 4u);
+  size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv*
+  return static_args + param_args + 1;
+}
+}  // namespace mips
+}  // namespace art
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
new file mode 100644
index 0000000..9068136
--- /dev/null
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
+#define ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips {
+class MipsManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~MipsManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
+};
+
+class MipsJniCallingConvention : public JniCallingConvention {
+ public:
+  explicit MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~MipsJniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual void Next();  // Override default behavior for AAPCS
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  // Padding to ensure longs and doubles are not split in AAPCS
+  size_t padding_;
+
+  DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
+};
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
new file mode 100644
index 0000000..b671bd1
--- /dev/null
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86.h"
+
+#include "base/logging.h"
+#include "oat/utils/x86/managed_register_x86.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+// Calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
+  return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+  if (shorty[0] == 'F' || shorty[0] == 'D') {
+    if (jni) {
+      return X86ManagedRegister::FromX87Register(ST0);
+    } else {
+      return X86ManagedRegister::FromXmmRegister(XMM0);
+    }
+  } else if (shorty[0] == 'J') {
+    return X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  } else if (shorty[0] == 'V') {
+    return ManagedRegister::NoRegister();
+  } else {
+    return X86ManagedRegister::FromCpuRegister(EAX);
+  }
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+ManagedRegister X86JniCallingConvention::IntReturnRegister() {
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() +   // displacement
+                     kPointerSize +                 // Method*
+                     (itr_slots_ * kPointerSize));  // offset into in args
+}
+
+const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on X86 to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(ECX));
+      if (num_spills > 1) {
+        entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EDX));
+        if (num_spills > 2) {
+          entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EBX));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+
+// JNI calling convention
+
+X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EBP));
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(ESI));
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EDI));
+}
+
+uint32_t X86JniCallingConvention::CoreSpillMask() const {
+  return 1 << EBP | 1 << ESI | 1 << EDI | 1 << kNumberOfCpuRegisters;
+}
+
+size_t X86JniCallingConvention::FrameSize() {
+  // Method*, return address and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86JniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86JniCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack.
+}
+
+bool X86JniCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack.
+}
+
+ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize));
+}
+
+size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv* and return pc (pushed after Method*)
+  size_t total_args = static_args + param_args + 2;
+  return total_args;
+
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
new file mode 100644
index 0000000..ea8a26e
--- /dev/null
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+#define ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace x86 {
+
+class X86ManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  explicit X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+                                              const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~X86ManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+  DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
+};
+
+class X86JniCallingConvention : public JniCallingConvention {
+ public:
+  explicit X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~X86JniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
diff --git a/compiler/llvm/art_module.ll b/compiler/llvm/art_module.ll
new file mode 100644
index 0000000..233692c
--- /dev/null
+++ b/compiler/llvm/art_module.ll
@@ -0,0 +1,153 @@
+;;
+;; Copyright (C) 2012 The Android Open Source Project
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;;      http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Type
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%JavaObject = type opaque
+
+%ShadowFrame = type { i32                  ; Number of VRegs
+                    , %ShadowFrame*        ; Previous frame
+                    , %JavaObject*         ; Method object pointer
+                    , i32                  ; Line number for stack backtrace
+                    ; [0 x i32]            ; VRegs
+                    }
+
+declare void @__art_type_list(%JavaObject*, %ShadowFrame*)
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Thread
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare %JavaObject* @art_portable_get_current_thread_from_code()
+declare %JavaObject* @art_portable_set_current_thread_from_code(%JavaObject*)
+
+declare void @art_portable_lock_object_from_code(%JavaObject*, %JavaObject*)
+declare void @art_portable_unlock_object_from_code(%JavaObject*, %JavaObject*)
+
+declare void @art_portable_test_suspend_from_code(%JavaObject*)
+
+declare %ShadowFrame* @art_portable_push_shadow_frame_from_code(%JavaObject*, %ShadowFrame*, %JavaObject*, i32)
+declare void @art_portable_pop_shadow_frame_from_code(%ShadowFrame*)
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Exception
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare %JavaObject* @art_portable_get_and_clear_exception(%JavaObject*)
+declare void @art_portable_throw_div_zero_from_code()
+declare void @art_portable_throw_array_bounds_from_code(i32, i32)
+declare void @art_portable_throw_no_such_method_from_code(i32)
+declare void @art_portable_throw_null_pointer_exception_from_code(i32)
+declare void @art_portable_throw_stack_overflow_from_code()
+declare void @art_portable_throw_exception_from_code(%JavaObject*)
+
+declare i32 @art_portable_find_catch_block_from_code(%JavaObject*, i32)
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Object Space
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare %JavaObject* @art_portable_alloc_object_from_code(i32, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_alloc_object_from_code_with_access_check(i32, %JavaObject*, %JavaObject*)
+
+declare %JavaObject* @art_portable_alloc_array_from_code(i32, %JavaObject*, i32, %JavaObject*)
+declare %JavaObject* @art_portable_alloc_array_from_code_with_access_check(i32, %JavaObject*, i32, %JavaObject*)
+declare %JavaObject* @art_portable_check_and_alloc_array_from_code(i32, %JavaObject*, i32, %JavaObject*)
+declare %JavaObject* @art_portable_check_and_alloc_array_from_code_with_access_check(i32, %JavaObject*, i32, %JavaObject*)
+
+declare void @art_portable_find_instance_field_from_code(i32, %JavaObject*)
+declare void @art_portable_find_static_field_from_code(i32, %JavaObject*)
+
+declare %JavaObject* @art_portable_find_static_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_find_direct_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_find_virtual_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_find_super_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_find_interface_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_find_interface_method_from_code(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+
+declare %JavaObject* @art_portable_initialize_static_storage_from_code(i32, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_initialize_type_from_code(i32, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_initialize_type_and_verify_access_from_code(i32, %JavaObject*, %JavaObject*)
+
+declare %JavaObject* @art_portable_resolve_string_from_code(%JavaObject*, i32)
+
+declare i32 @art_portable_set32_static_from_code(i32, %JavaObject*, i32)
+declare i32 @art_portable_set64_static_from_code(i32, %JavaObject*, i64)
+declare i32 @art_portable_set_obj_static_from_code(i32, %JavaObject*, %JavaObject*)
+
+declare i32 @art_portable_get32_static_from_code(i32, %JavaObject*)
+declare i64 @art_portable_get64_static_from_code(i32, %JavaObject*)
+declare %JavaObject* @art_portable_get_obj_static_from_code(i32, %JavaObject*)
+
+declare i32 @art_portable_set32_instance_from_code(i32, %JavaObject*, %JavaObject*, i32)
+declare i32 @art_portable_set64_instance_from_code(i32, %JavaObject*, %JavaObject*, i64)
+declare i32 @art_portable_set_obj_instance_from_code(i32, %JavaObject*, %JavaObject*, %JavaObject*)
+
+declare i32 @art_portable_get32_instance_from_code(i32, %JavaObject*, %JavaObject*)
+declare i64 @art_portable_get64_instance_from_code(i32, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_get_obj_instance_from_code(i32, %JavaObject*, %JavaObject*)
+
+declare %JavaObject* @art_portable_decode_jobject_in_thread(%JavaObject*, %JavaObject*)
+
+declare void @art_portable_fill_array_data_from_code(%JavaObject*, i32, %JavaObject*, i32)
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Type Checking, in the nature of casting
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare i32 @art_portable_is_assignable_from_code(%JavaObject*, %JavaObject*)
+declare void @art_portable_check_cast_from_code(%JavaObject*, %JavaObject*)
+declare void @art_portable_check_put_array_element_from_code(%JavaObject*, %JavaObject*)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Math
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare i64 @art_d2l(double)
+declare i32 @art_d2i(double)
+declare i64 @art_f2l(float)
+declare i32 @art_f2i(float)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; JNI
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare i32 @art_portable_jni_method_start(%JavaObject*)
+declare i32 @art_portable_jni_method_start_synchronized(%JavaObject*, %JavaObject*)
+
+declare void @art_portable_jni_method_end(i32, %JavaObject*)
+declare void @art_portable_jni_method_end_synchronized(i32, %JavaObject*, %JavaObject*)
+declare %JavaObject* @art_portable_jni_method_end_with_reference(%JavaObject*, i32, %JavaObject*)
+declare %JavaObject* @art_portable_jni_method_end_with_reference_synchronized(%JavaObject*, i32, %JavaObject*, %JavaObject*)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Temporary runtime support, will be removed in the future
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare i1 @art_portable_is_exception_pending_from_code()
+
+declare void @art_portable_mark_gc_card_from_code(%JavaObject*, %JavaObject*)
+
+declare void @art_portable_proxy_invoke_handler_from_code(%JavaObject*, ...)
diff --git a/compiler/llvm/backend_options.h b/compiler/llvm/backend_options.h
new file mode 100644
index 0000000..924a346
--- /dev/null
+++ b/compiler/llvm/backend_options.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_BACKEND_OPTIONS_H_
+#define ART_SRC_COMPILER_LLVM_BACKEND_OPTIONS_H_
+
+#include <llvm/Support/CommandLine.h>
+
+#define DECLARE_ARM_BACKEND_OPTIONS \
+extern llvm::cl::opt<bool> EnableARMLongCalls; \
+extern llvm::cl::opt<bool> ReserveR9;
+
+#define INITIAL_ARM_BACKEND_OPTIONS \
+EnableARMLongCalls = true; \
+ReserveR9 = true;
+
+#define DECLARE_X86_BACKEND_OPTIONS
+#define INITIAL_X86_BACKEND_OPTIONS
+
+#define DECLARE_Mips_BACKEND_OPTIONS
+#define INITIAL_Mips_BACKEND_OPTIONS
+
+#define LLVM_TARGET(TargetName) DECLARE_##TargetName##_BACKEND_OPTIONS
+#include "llvm/Config/Targets.def"
+
+namespace art {
+namespace llvm {
+
+inline void InitialBackendOptions() {
+#define LLVM_TARGET(TargetName) INITIAL_##TargetName##_BACKEND_OPTIONS
+#include "llvm/Config/Targets.def"
+}
+
+}  // namespace llvm
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_LLVM_BACKEND_OPTIONS_H_
diff --git a/compiler/llvm/backend_types.h b/compiler/llvm/backend_types.h
new file mode 100644
index 0000000..c89504a
--- /dev/null
+++ b/compiler/llvm/backend_types.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_BACKEND_TYPES_H_
+#define ART_SRC_COMPILER_LLVM_BACKEND_TYPES_H_
+
+#include "base/logging.h"
+
+
+namespace art {
+namespace llvm {
+
+
+enum JType {
+  kVoid,
+  kBoolean,
+  kByte,
+  kChar,
+  kShort,
+  kInt,
+  kLong,
+  kFloat,
+  kDouble,
+  kObject,
+  MAX_JTYPE
+};
+
+enum TBAASpecialType {
+  kTBAARegister,
+  kTBAAStackTemp,
+  kTBAAHeapArray,
+  kTBAAHeapInstance,
+  kTBAAHeapStatic,
+  kTBAAJRuntime,
+  kTBAARuntimeInfo,
+  kTBAAShadowFrame,
+  kTBAAConstJObject,
+  MAX_TBAA_SPECIAL_TYPE
+};
+
+
+enum ExpectCond {
+  kLikely,
+  kUnlikely,
+  MAX_EXPECT
+};
+
+
+inline JType GetJTypeFromShorty(char shorty_jty) {
+  switch (shorty_jty) {
+  case 'V':
+    return kVoid;
+
+  case 'Z':
+    return kBoolean;
+
+  case 'B':
+    return kByte;
+
+  case 'C':
+    return kChar;
+
+  case 'S':
+    return kShort;
+
+  case 'I':
+    return kInt;
+
+  case 'J':
+    return kLong;
+
+  case 'F':
+    return kFloat;
+
+  case 'D':
+    return kDouble;
+
+  case 'L':
+    return kObject;
+
+  default:
+    LOG(FATAL) << "Unknown Dalvik shorty descriptor: " << shorty_jty;
+    return kVoid;
+  }
+}
+
+} // namespace llvm
+} // namespace art
+
+
+#endif // ART_SRC_COMPILER_LLVM_BACKEND_TYPES_H_
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
new file mode 100644
index 0000000..afca223
--- /dev/null
+++ b/compiler/llvm/compiler_llvm.cc
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_llvm.h"
+
+#include "backend_options.h"
+#include "base/stl_util.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "globals.h"
+#include "ir_builder.h"
+#include "jni/portable/jni_compiler.h"
+#include "llvm_compilation_unit.h"
+#include "oat_file.h"
+#include "utils_llvm.h"
+#include "verifier/method_verifier.h"
+
+#include <llvm/LinkAllPasses.h>
+#include <llvm/Support/ManagedStatic.h>
+#include <llvm/Support/TargetSelect.h>
+#include <llvm/Support/Threading.h>
+
+namespace art {
+void CompileOneMethod(CompilerDriver& driver,
+                      const CompilerBackend compilerBackend,
+                      const DexFile::CodeItem* code_item,
+                      uint32_t access_flags, InvokeType invoke_type,
+                      uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                      const DexFile& dex_file,
+                      llvm::LlvmCompilationUnit* llvm_info);
+}
+
+namespace llvm {
+  extern bool TimePassesIsEnabled;
+}
+
+namespace {
+
+pthread_once_t llvm_initialized = PTHREAD_ONCE_INIT;
+
+void InitializeLLVM() {
+  // Initialize LLVM internal data structure for multithreading
+  llvm::llvm_start_multithreaded();
+
+  // NOTE: Uncomment following line to show the time consumption of LLVM passes
+  //llvm::TimePassesIsEnabled = true;
+
+  // Initialize LLVM target-specific options.
+  art::llvm::InitialBackendOptions();
+
+  // Initialize LLVM target, MC subsystem, asm printer, and asm parser.
+  if (art::kIsTargetBuild) {
+    // Don't initialize all targets on device. Just initialize the device's native target
+    llvm::InitializeNativeTarget();
+    llvm::InitializeNativeTargetAsmPrinter();
+    llvm::InitializeNativeTargetAsmParser();
+  } else {
+    llvm::InitializeAllTargets();
+    llvm::InitializeAllTargetMCs();
+    llvm::InitializeAllAsmPrinters();
+    llvm::InitializeAllAsmParsers();
+  }
+
+  // Initialize LLVM optimization passes
+  llvm::PassRegistry &registry = *llvm::PassRegistry::getPassRegistry();
+
+  llvm::initializeCore(registry);
+  llvm::initializeScalarOpts(registry);
+  llvm::initializeIPO(registry);
+  llvm::initializeAnalysis(registry);
+  llvm::initializeIPA(registry);
+  llvm::initializeTransformUtils(registry);
+  llvm::initializeInstCombine(registry);
+  llvm::initializeInstrumentation(registry);
+  llvm::initializeTarget(registry);
+}
+
+// The Guard to Shutdown LLVM
+// llvm::llvm_shutdown_obj llvm_guard;
+// TODO: We are commenting out this line because this will cause SEGV from
+// time to time.
+// Two reasons: (1) the order of the destruction of static objects, or
+//              (2) dlopen/dlclose side-effect on static objects.
+
+} // anonymous namespace
+
+
+namespace art {
+namespace llvm {
+
+
+::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
+
+
+CompilerLLVM::CompilerLLVM(CompilerDriver* driver, InstructionSet insn_set)
+    : compiler_driver_(driver), insn_set_(insn_set),
+      next_cunit_id_lock_("compilation unit id lock"), next_cunit_id_(1) {
+
+  // Initialize LLVM libraries
+  pthread_once(&llvm_initialized, InitializeLLVM);
+}
+
+
+CompilerLLVM::~CompilerLLVM() {
+}
+
+
+LlvmCompilationUnit* CompilerLLVM::AllocateCompilationUnit() {
+  MutexLock GUARD(Thread::Current(), next_cunit_id_lock_);
+  LlvmCompilationUnit* cunit = new LlvmCompilationUnit(this, next_cunit_id_++);
+  if (!bitcode_filename_.empty()) {
+    cunit->SetBitcodeFileName(StringPrintf("%s-%zu",
+                                           bitcode_filename_.c_str(),
+                                           cunit->GetCompilationUnitId()));
+  }
+  return cunit;
+}
+
+
+CompiledMethod* CompilerLLVM::
+CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_type) {
+  UniquePtr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
+
+  cunit->SetDexCompilationUnit(dex_compilation_unit);
+  cunit->SetCompilerDriver(compiler_driver_);
+  // TODO: consolidate ArtCompileMethods
+  CompileOneMethod(*compiler_driver_,
+                   kPortable,
+                   dex_compilation_unit->GetCodeItem(),
+                   dex_compilation_unit->GetAccessFlags(),
+                   invoke_type,
+                   dex_compilation_unit->GetClassDefIndex(),
+                   dex_compilation_unit->GetDexMethodIndex(),
+                   dex_compilation_unit->GetClassLoader(),
+                   *dex_compilation_unit->GetDexFile(),
+                   cunit.get());
+
+  cunit->Materialize();
+
+  MethodReference mref(dex_compilation_unit->GetDexFile(),
+                       dex_compilation_unit->GetDexMethodIndex());
+  return new CompiledMethod(compiler_driver_->GetInstructionSet(),
+                            cunit->GetElfObject(),
+                            *verifier::MethodVerifier::GetDexGcMap(mref),
+                            cunit->GetDexCompilationUnit()->GetSymbol());
+}
+
+
+CompiledMethod* CompilerLLVM::
+CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
+  UniquePtr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
+
+  UniquePtr<JniCompiler> jni_compiler(
+      new JniCompiler(cunit.get(), *compiler_driver_, dex_compilation_unit));
+
+  return jni_compiler->Compile();
+}
+
+
+} // namespace llvm
+} // namespace art
+
+inline static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver& driver) {
+  void *compiler_context = driver.GetCompilerContext();
+  CHECK(compiler_context != NULL);
+  return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
+}
+
+inline static const art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
+  void *compiler_context = driver.GetCompilerContext();
+  CHECK(compiler_context != NULL);
+  return reinterpret_cast<const art::llvm::CompilerLLVM*>(compiler_context);
+}
+
+extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver) {
+  CHECK(driver.GetCompilerContext() == NULL);
+
+  art::llvm::CompilerLLVM* compiler_llvm = new art::llvm::CompilerLLVM(&driver,
+                                                                       driver.GetInstructionSet());
+
+  driver.SetCompilerContext(compiler_llvm);
+}
+
+extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver) {
+  delete ContextOf(driver);
+  driver.SetCompilerContext(NULL);
+}
+extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
+                                                 const art::DexFile::CodeItem* code_item,
+                                                 uint32_t access_flags,
+                                                 art::InvokeType invoke_type,
+                                                 uint32_t class_def_idx,
+                                                 uint32_t method_idx,
+                                                 jobject class_loader,
+                                                 const art::DexFile& dex_file) {
+  UNUSED(class_def_idx);  // TODO: this is used with Compiler::RequiresConstructorBarrier.
+  art::ClassLinker *class_linker = art::Runtime::Current()->GetClassLinker();
+
+  art::DexCompilationUnit dex_compilation_unit(
+    NULL, class_loader, class_linker, dex_file, code_item,
+    class_def_idx, method_idx, access_flags);
+  art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver);
+  art::CompiledMethod* result = compiler_llvm->CompileDexMethod(&dex_compilation_unit, invoke_type);
+  return result;
+}
+
+extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver,
+                                                        uint32_t access_flags, uint32_t method_idx,
+                                                        const art::DexFile& dex_file) {
+  art::ClassLinker *class_linker = art::Runtime::Current()->GetClassLinker();
+
+  art::DexCompilationUnit dex_compilation_unit(
+    NULL, NULL, class_linker, dex_file, NULL,
+    0, method_idx, access_flags);
+
+  art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver);
+  art::CompiledMethod* result = compiler_llvm->CompileNativeMethod(&dex_compilation_unit);
+  return result;
+}
+
+extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
+                                               std::string const& filename) {
+  ContextOf(driver)->SetBitcodeFileName(filename);
+}
diff --git a/compiler/llvm/compiler_llvm.h b/compiler/llvm/compiler_llvm.h
new file mode 100644
index 0000000..b70ddc5
--- /dev/null
+++ b/compiler/llvm/compiler_llvm.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_COMPILER_LLVM_H_
+#define ART_SRC_COMPILER_LLVM_COMPILER_LLVM_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "driver/compiler_driver.h"
+#include "instruction_set.h"
+#include "mirror/object.h"
+
+#include <UniquePtr.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace art {
+  class CompiledMethod;
+  class CompilerDriver;
+  class DexCompilationUnit;
+  namespace mirror {
+    class AbstractMethod;
+    class ClassLoader;
+  }  // namespace mirror
+}  // namespace art
+
+
+namespace llvm {
+  class Function;
+  class LLVMContext;
+  class Module;
+  class PointerType;
+  class StructType;
+  class Type;
+}  // namespace llvm
+
+
+namespace art {
+namespace llvm {
+
+class LlvmCompilationUnit;
+class IRBuilder;
+
+class CompilerLLVM {
+ public:
+  CompilerLLVM(CompilerDriver* driver, InstructionSet insn_set);
+
+  ~CompilerLLVM();
+
+  CompilerDriver* GetCompiler() const {
+    return compiler_driver_;
+  }
+
+  InstructionSet GetInstructionSet() const {
+    return insn_set_;
+  }
+
+  void SetBitcodeFileName(std::string const& filename) {
+    bitcode_filename_ = filename;
+  }
+
+  CompiledMethod* CompileDexMethod(DexCompilationUnit* dex_compilation_unit,
+                                   InvokeType invoke_type);
+
+  CompiledMethod* CompileGBCMethod(DexCompilationUnit* dex_compilation_unit, std::string* func);
+
+  CompiledMethod* CompileNativeMethod(DexCompilationUnit* dex_compilation_unit);
+
+ private:
+  LlvmCompilationUnit* AllocateCompilationUnit();
+
+  CompilerDriver* const compiler_driver_;
+
+  const InstructionSet insn_set_;
+
+  Mutex next_cunit_id_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  size_t next_cunit_id_ GUARDED_BY(next_cunit_id_lock_);
+
+  std::string bitcode_filename_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilerLLVM);
+};
+
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_COMPILER_LLVM_H_
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
new file mode 100644
index 0000000..b139e32
--- /dev/null
+++ b/compiler/llvm/gbc_expander.cc
@@ -0,0 +1,3753 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "intrinsic_helper.h"
+#include "ir_builder.h"
+#include "method_reference.h"
+#include "mirror/abstract_method.h"
+#include "mirror/array.h"
+#include "mirror/string.h"
+#include "thread.h"
+#include "utils_llvm.h"
+#include "verifier/method_verifier.h"
+
+#include "dex/compiler_ir.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+using art::kMIRIgnoreNullCheck;
+using art::kMIRIgnoreRangeCheck;
+
+#include <llvm/ADT/STLExtras.h>
+#include <llvm/IR/Intrinsics.h>
+#include <llvm/IR/Metadata.h>
+#include <llvm/Pass.h>
+#include <llvm/Support/CFG.h>
+#include <llvm/Support/InstIterator.h>
+
+#include <vector>
+#include <map>
+#include <utility>
+
+using namespace art::llvm;
+
+using art::llvm::IntrinsicHelper;
+
+namespace art {
+extern char RemapShorty(char shortyType);
+};
+
+namespace {
+
+class GBCExpanderPass : public llvm::FunctionPass {
+ private:
+  const IntrinsicHelper& intrinsic_helper_;
+  IRBuilder& irb_;
+
+  llvm::LLVMContext& context_;
+  RuntimeSupportBuilder& rtb_;
+
+ private:
+  llvm::AllocaInst* shadow_frame_;
+  llvm::Value* old_shadow_frame_;
+
+ private:
+  art::CompilerDriver* const driver_;
+
+  const art::DexCompilationUnit* const dex_compilation_unit_;
+
+  llvm::Function* func_;
+
+  std::vector<llvm::BasicBlock*> basic_blocks_;
+
+  std::vector<llvm::BasicBlock*> basic_block_landing_pads_;
+  llvm::BasicBlock* current_bb_;
+  std::map<llvm::BasicBlock*, std::vector<std::pair<llvm::BasicBlock*, llvm::BasicBlock*> > >
+      landing_pad_phi_mapping_;
+  llvm::BasicBlock* basic_block_unwind_;
+
+  // Maps each vreg to its shadow frame address.
+  std::vector<llvm::Value*> shadow_frame_vreg_addresses_;
+
+  bool changed_;
+
+ private:
+  //----------------------------------------------------------------------------
+  // Constant for GBC expansion
+  //----------------------------------------------------------------------------
+  enum IntegerShiftKind {
+    kIntegerSHL,
+    kIntegerSHR,
+    kIntegerUSHR,
+  };
+
+ private:
+  //----------------------------------------------------------------------------
+  // Helper function for GBC expansion
+  //----------------------------------------------------------------------------
+
+  llvm::Value* ExpandToRuntime(runtime_support::RuntimeId rt,
+                               llvm::CallInst& inst);
+
+  uint64_t LV2UInt(llvm::Value* lv) {
+    return llvm::cast<llvm::ConstantInt>(lv)->getZExtValue();
+  }
+
+  int64_t LV2SInt(llvm::Value* lv) {
+    return llvm::cast<llvm::ConstantInt>(lv)->getSExtValue();
+  }
+
+ private:
+  // TODO: Almost all Emit* are directly copy-n-paste from MethodCompiler.
+  // Refactor these utility functions from MethodCompiler to avoid forking.
+
+  void EmitStackOverflowCheck(llvm::Instruction* first_non_alloca);
+
+  void RewriteFunction();
+
+  void RewriteBasicBlock(llvm::BasicBlock* original_block);
+
+  void UpdatePhiInstruction(llvm::BasicBlock* old_basic_block,
+                            llvm::BasicBlock* new_basic_block);
+
+
+  // Sign or zero extend category 1 types < 32bits in size to 32bits.
+  llvm::Value* SignOrZeroExtendCat1Types(llvm::Value* value, JType jty);
+
+  // Truncate category 1 types from 32bits to the given JType size.
+  llvm::Value* TruncateCat1Types(llvm::Value* value, JType jty);
+
+  //----------------------------------------------------------------------------
+  // Dex cache code generation helper function
+  //----------------------------------------------------------------------------
+  llvm::Value* EmitLoadDexCacheAddr(art::MemberOffset dex_cache_offset);
+
+  llvm::Value* EmitLoadDexCacheStaticStorageFieldAddr(uint32_t type_idx);
+
+  llvm::Value* EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx);
+
+  llvm::Value* EmitLoadDexCacheResolvedMethodFieldAddr(uint32_t method_idx);
+
+  llvm::Value* EmitLoadDexCacheStringFieldAddr(uint32_t string_idx);
+
+  //----------------------------------------------------------------------------
+  // Code generation helper function
+  //----------------------------------------------------------------------------
+  llvm::Value* EmitLoadMethodObjectAddr();
+
+  llvm::Value* EmitLoadArrayLength(llvm::Value* array);
+
+  llvm::Value* EmitLoadSDCalleeMethodObjectAddr(uint32_t callee_method_idx);
+
+  llvm::Value* EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx,
+                                                     llvm::Value* this_addr);
+
+  llvm::Value* EmitArrayGEP(llvm::Value* array_addr,
+                            llvm::Value* index_value,
+                            JType elem_jty);
+
+  //----------------------------------------------------------------------------
+  // Invoke helper function
+  //----------------------------------------------------------------------------
+  llvm::Value* EmitInvoke(llvm::CallInst& call_inst);
+
+  //----------------------------------------------------------------------------
+  // Inlining helper functions
+  //----------------------------------------------------------------------------
+  bool EmitIntrinsic(llvm::CallInst& call_inst, llvm::Value** result);
+
+  bool EmitIntrinsicStringLengthOrIsEmpty(llvm::CallInst& call_inst,
+                                          llvm::Value** result, bool is_empty);
+
+ private:
+  //----------------------------------------------------------------------------
+  // Expand Greenland intrinsics
+  //----------------------------------------------------------------------------
+  void Expand_TestSuspend(llvm::CallInst& call_inst);
+
+  void Expand_MarkGCCard(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_LoadStringFromDexCache(llvm::Value* string_idx_value);
+
+  llvm::Value* Expand_LoadTypeFromDexCache(llvm::Value* type_idx_value);
+
+  void Expand_LockObject(llvm::Value* obj);
+
+  void Expand_UnlockObject(llvm::Value* obj);
+
+  llvm::Value* Expand_ArrayGet(llvm::Value* array_addr,
+                               llvm::Value* index_value,
+                               JType elem_jty);
+
+  void Expand_ArrayPut(llvm::Value* new_value,
+                       llvm::Value* array_addr,
+                       llvm::Value* index_value,
+                       JType elem_jty);
+
+  void Expand_FilledNewArray(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_IGetFast(llvm::Value* field_offset_value,
+                               llvm::Value* is_volatile_value,
+                               llvm::Value* object_addr,
+                               JType field_jty);
+
+  void Expand_IPutFast(llvm::Value* field_offset_value,
+                       llvm::Value* is_volatile_value,
+                       llvm::Value* object_addr,
+                       llvm::Value* new_value,
+                       JType field_jty);
+
+  llvm::Value* Expand_SGetFast(llvm::Value* static_storage_addr,
+                               llvm::Value* field_offset_value,
+                               llvm::Value* is_volatile_value,
+                               JType field_jty);
+
+  void Expand_SPutFast(llvm::Value* static_storage_addr,
+                       llvm::Value* field_offset_value,
+                       llvm::Value* is_volatile_value,
+                       llvm::Value* new_value,
+                       JType field_jty);
+
+  llvm::Value* Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr);
+
+  llvm::Value* Expand_LoadClassSSBFromDexCache(llvm::Value* type_idx_value);
+
+  llvm::Value*
+  Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value);
+
+  llvm::Value*
+  Expand_GetVirtualCalleeMethodObjAddrFast(llvm::Value* vtable_idx_value,
+                                           llvm::Value* this_addr);
+
+  llvm::Value* Expand_Invoke(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_DivRem(llvm::CallInst& call_inst, bool is_div, JType op_jty);
+
+  void Expand_AllocaShadowFrame(llvm::Value* num_vregs_value);
+
+  void Expand_SetVReg(llvm::Value* entry_idx, llvm::Value* obj);
+
+  void Expand_PopShadowFrame();
+
+  void Expand_UpdateDexPC(llvm::Value* dex_pc_value);
+
+  //----------------------------------------------------------------------------
+  // Quick
+  //----------------------------------------------------------------------------
+
+  llvm::Value* Expand_FPCompare(llvm::Value* src1_value,
+                                llvm::Value* src2_value,
+                                bool gt_bias);
+
+  llvm::Value* Expand_LongCompare(llvm::Value* src1_value, llvm::Value* src2_value);
+
+  llvm::Value* EmitCompareResultSelection(llvm::Value* cmp_eq,
+                                          llvm::Value* cmp_lt);
+
+  llvm::Value* EmitLoadConstantClass(uint32_t dex_pc, uint32_t type_idx);
+  llvm::Value* EmitLoadStaticStorage(uint32_t dex_pc, uint32_t type_idx);
+
+  llvm::Value* Expand_HLIGet(llvm::CallInst& call_inst, JType field_jty);
+  void Expand_HLIPut(llvm::CallInst& call_inst, JType field_jty);
+
+  llvm::Value* Expand_HLSget(llvm::CallInst& call_inst, JType field_jty);
+  void Expand_HLSput(llvm::CallInst& call_inst, JType field_jty);
+
+  llvm::Value* Expand_HLArrayGet(llvm::CallInst& call_inst, JType field_jty);
+  void Expand_HLArrayPut(llvm::CallInst& call_inst, JType field_jty);
+
+  llvm::Value* Expand_ConstString(llvm::CallInst& call_inst);
+  llvm::Value* Expand_ConstClass(llvm::CallInst& call_inst);
+
+  void Expand_MonitorEnter(llvm::CallInst& call_inst);
+  void Expand_MonitorExit(llvm::CallInst& call_inst);
+
+  void Expand_HLCheckCast(llvm::CallInst& call_inst);
+  llvm::Value* Expand_InstanceOf(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_NewInstance(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_HLInvoke(llvm::CallInst& call_inst);
+
+  llvm::Value* Expand_OptArrayLength(llvm::CallInst& call_inst);
+  llvm::Value* Expand_NewArray(llvm::CallInst& call_inst);
+  llvm::Value* Expand_HLFilledNewArray(llvm::CallInst& call_inst);
+  void Expand_HLFillArrayData(llvm::CallInst& call_inst);
+
+  llvm::Value* EmitAllocNewArray(uint32_t dex_pc,
+                                 llvm::Value* array_length_value,
+                                 uint32_t type_idx,
+                                 bool is_filled_new_array);
+
+  llvm::Value* EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx,
+                                                        art::InvokeType invoke_type,
+                                                        llvm::Value* this_addr,
+                                                        uint32_t dex_pc,
+                                                        bool is_fast_path);
+
+  void EmitMarkGCCard(llvm::Value* value, llvm::Value* target_addr);
+
+  void EmitUpdateDexPC(uint32_t dex_pc);
+
+  void EmitGuard_DivZeroException(uint32_t dex_pc,
+                                  llvm::Value* denominator,
+                                  JType op_jty);
+
+  void EmitGuard_NullPointerException(uint32_t dex_pc, llvm::Value* object,
+                                      int opt_flags);
+
+  void EmitGuard_ArrayIndexOutOfBoundsException(uint32_t dex_pc,
+                                                llvm::Value* array,
+                                                llvm::Value* index,
+                                                int opt_flags);
+
+  llvm::FunctionType* GetFunctionType(llvm::Type* ret_type, uint32_t method_idx, bool is_static);
+
+  llvm::BasicBlock* GetBasicBlock(uint32_t dex_pc);
+
+  llvm::BasicBlock* CreateBasicBlockWithDexPC(uint32_t dex_pc,
+                                              const char* postfix);
+
+  int32_t GetTryItemOffset(uint32_t dex_pc);
+
+  llvm::BasicBlock* GetLandingPadBasicBlock(uint32_t dex_pc);
+
+  llvm::BasicBlock* GetUnwindBasicBlock();
+
+  void EmitGuard_ExceptionLandingPad(uint32_t dex_pc);
+
+  void EmitBranchExceptionLandingPad(uint32_t dex_pc);
+
+  //----------------------------------------------------------------------------
+  // Expand Arithmetic Helper Intrinsics
+  //----------------------------------------------------------------------------
+
+  llvm::Value* Expand_IntegerShift(llvm::Value* src1_value,
+                                   llvm::Value* src2_value,
+                                   IntegerShiftKind kind,
+                                   JType op_jty);
+
+ public:
+  static char ID;
+
+  GBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
+                  art::CompilerDriver* driver, const art::DexCompilationUnit* dex_compilation_unit)
+      : llvm::FunctionPass(ID), intrinsic_helper_(intrinsic_helper), irb_(irb),
+        context_(irb.getContext()), rtb_(irb.Runtime()),
+        shadow_frame_(NULL), old_shadow_frame_(NULL),
+        driver_(driver),
+        dex_compilation_unit_(dex_compilation_unit),
+        func_(NULL), current_bb_(NULL), basic_block_unwind_(NULL), changed_(false) {}
+
+  bool runOnFunction(llvm::Function& func);
+
+ private:
+  void InsertStackOverflowCheck(llvm::Function& func);
+
+  llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
+                               llvm::CallInst& call_inst);
+
+};
+
+char GBCExpanderPass::ID = 0;
+
+bool GBCExpanderPass::runOnFunction(llvm::Function& func) {
+  VLOG(compiler) << "GBC expansion on " << func.getName().str();
+
+  // Runtime support or stub
+  if (dex_compilation_unit_ == NULL) {
+    return false;
+  }
+
+  // Setup rewrite context
+  shadow_frame_ = NULL;
+  old_shadow_frame_ = NULL;
+  func_ = &func;
+  changed_ = false; // Assume unchanged
+
+  shadow_frame_vreg_addresses_.resize(dex_compilation_unit_->GetCodeItem()->registers_size_, NULL);
+  basic_blocks_.resize(dex_compilation_unit_->GetCodeItem()->insns_size_in_code_units_);
+  basic_block_landing_pads_.resize(dex_compilation_unit_->GetCodeItem()->tries_size_, NULL);
+  basic_block_unwind_ = NULL;
+  for (llvm::Function::iterator bb_iter = func_->begin(), bb_end = func_->end();
+       bb_iter != bb_end;
+       ++bb_iter) {
+    if (bb_iter->begin()->getMetadata("DexOff") == NULL) {
+      continue;
+    }
+    uint32_t dex_pc = LV2UInt(bb_iter->begin()->getMetadata("DexOff")->getOperand(0));
+    basic_blocks_[dex_pc] = bb_iter;
+  }
+
+  // Insert stack overflow check
+  InsertStackOverflowCheck(func); // TODO: Use intrinsic.
+
+  // Rewrite the intrinsics
+  RewriteFunction();
+
+  VERIFY_LLVM_FUNCTION(func);
+
+  return changed_;
+}
+
+void GBCExpanderPass::RewriteBasicBlock(llvm::BasicBlock* original_block) {
+  llvm::BasicBlock* curr_basic_block = original_block;
+
+  llvm::BasicBlock::iterator inst_iter = original_block->begin();
+  llvm::BasicBlock::iterator inst_end = original_block->end();
+
+  while (inst_iter != inst_end) {
+    llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(inst_iter);
+    IntrinsicHelper::IntrinsicId intr_id = IntrinsicHelper::UnknownId;
+
+    if (call_inst) {
+      llvm::Function* callee_func = call_inst->getCalledFunction();
+      intr_id = intrinsic_helper_.GetIntrinsicId(callee_func);
+    }
+
+    if (intr_id == IntrinsicHelper::UnknownId) {
+      // This is not intrinsic call.  Skip this instruction.
+      ++inst_iter;
+      continue;
+    }
+
+    // Rewrite the intrinsic and change the function
+    changed_ = true;
+    irb_.SetInsertPoint(inst_iter);
+
+    // Expand the intrinsic
+    if (llvm::Value* new_value = ExpandIntrinsic(intr_id, *call_inst)) {
+      inst_iter->replaceAllUsesWith(new_value);
+    }
+
+    // Remove the old intrinsic call instruction
+    llvm::BasicBlock::iterator old_inst = inst_iter++;
+    old_inst->eraseFromParent();
+
+    // Splice the instruction to the new basic block
+    llvm::BasicBlock* next_basic_block = irb_.GetInsertBlock();
+    if (next_basic_block != curr_basic_block) {
+      next_basic_block->getInstList().splice(
+          irb_.GetInsertPoint(), curr_basic_block->getInstList(),
+          inst_iter, inst_end);
+      curr_basic_block = next_basic_block;
+      inst_end = curr_basic_block->end();
+    }
+  }
+}
+
+
+void GBCExpanderPass::RewriteFunction() {
+  size_t num_basic_blocks = func_->getBasicBlockList().size();
+  // NOTE: We are not using (bb_iter != bb_end) as the for-loop condition,
+  // because we will create new basic block while expanding the intrinsics.
+  // We only want to iterate through the input basic blocks.
+
+  landing_pad_phi_mapping_.clear();
+
+  for (llvm::Function::iterator bb_iter = func_->begin();
+       num_basic_blocks > 0; ++bb_iter, --num_basic_blocks) {
+    // Set insert point to current basic block.
+    irb_.SetInsertPoint(bb_iter);
+
+    current_bb_ = bb_iter;
+
+    // Rewrite the basic block
+    RewriteBasicBlock(bb_iter);
+
+    // Update the phi-instructions in the successor basic block
+    llvm::BasicBlock* last_block = irb_.GetInsertBlock();
+    if (last_block != bb_iter) {
+      UpdatePhiInstruction(bb_iter, last_block);
+    }
+  }
+
+  typedef std::map<llvm::PHINode*, llvm::PHINode*> HandlerPHIMap;
+  HandlerPHIMap handler_phi;
+  // Iterate every used landing pad basic block
+  for (size_t i = 0, ei = basic_block_landing_pads_.size(); i != ei; ++i) {
+    llvm::BasicBlock* lbb = basic_block_landing_pads_[i];
+    if (lbb == NULL) {
+      continue;
+    }
+
+    llvm::TerminatorInst* term_inst = lbb->getTerminator();
+    std::vector<std::pair<llvm::BasicBlock*, llvm::BasicBlock*> >& rewrite_pair
+        = landing_pad_phi_mapping_[lbb];
+    irb_.SetInsertPoint(lbb->begin());
+
+    // Iterate every succeeding basic block (catch block)
+    for (unsigned succ_iter = 0, succ_end = term_inst->getNumSuccessors();
+         succ_iter != succ_end; ++succ_iter) {
+      llvm::BasicBlock* succ_basic_block = term_inst->getSuccessor(succ_iter);
+
+      // Iterate every phi instructions in the succeeding basic block
+      for (llvm::BasicBlock::iterator
+           inst_iter = succ_basic_block->begin(),
+           inst_end = succ_basic_block->end();
+           inst_iter != inst_end; ++inst_iter) {
+        llvm::PHINode *phi = llvm::dyn_cast<llvm::PHINode>(inst_iter);
+
+        if (!phi) {
+          break; // Meet non-phi instruction.  Done.
+        }
+
+        if (handler_phi[phi] == NULL) {
+          handler_phi[phi] = llvm::PHINode::Create(phi->getType(), 1);
+        }
+
+        // Create new_phi in landing pad
+        llvm::PHINode* new_phi = irb_.CreatePHI(phi->getType(), rewrite_pair.size());
+        // Insert all incoming value into new_phi by rewrite_pair
+        for (size_t j = 0, ej = rewrite_pair.size(); j != ej; ++j) {
+          llvm::BasicBlock* old_bb = rewrite_pair[j].first;
+          llvm::BasicBlock* new_bb = rewrite_pair[j].second;
+          new_phi->addIncoming(phi->getIncomingValueForBlock(old_bb), new_bb);
+        }
+        // Delete all incoming value from phi by rewrite_pair
+        for (size_t j = 0, ej = rewrite_pair.size(); j != ej; ++j) {
+          llvm::BasicBlock* old_bb = rewrite_pair[j].first;
+          int old_bb_idx = phi->getBasicBlockIndex(old_bb);
+          if (old_bb_idx >= 0) {
+            phi->removeIncomingValue(old_bb_idx, false);
+          }
+        }
+        // Insert new_phi into new handler phi
+        handler_phi[phi]->addIncoming(new_phi, lbb);
+      }
+    }
+  }
+
+  // Replace all handler phi
+  // We can't just use the old handler phi, because some exception edges will disappear after we
+  // compute fast-path.
+  for (HandlerPHIMap::iterator it = handler_phi.begin(); it != handler_phi.end(); ++it) {
+    llvm::PHINode* old_phi = it->first;
+    llvm::PHINode* new_phi = it->second;
+    new_phi->insertBefore(old_phi);
+    old_phi->replaceAllUsesWith(new_phi);
+    old_phi->eraseFromParent();
+  }
+}
+
+void GBCExpanderPass::UpdatePhiInstruction(llvm::BasicBlock* old_basic_block,
+                                           llvm::BasicBlock* new_basic_block) {
+  llvm::TerminatorInst* term_inst = new_basic_block->getTerminator();
+
+  if (!term_inst) {
+    return; // No terminating instruction in new_basic_block.  Nothing to do.
+  }
+
+  // Iterate every succeeding basic block
+  for (unsigned succ_iter = 0, succ_end = term_inst->getNumSuccessors();
+       succ_iter != succ_end; ++succ_iter) {
+    llvm::BasicBlock* succ_basic_block = term_inst->getSuccessor(succ_iter);
+
+    // Iterate every phi instructions in the succeeding basic block
+    for (llvm::BasicBlock::iterator
+         inst_iter = succ_basic_block->begin(),
+         inst_end = succ_basic_block->end();
+         inst_iter != inst_end; ++inst_iter) {
+      llvm::PHINode *phi = llvm::dyn_cast<llvm::PHINode>(inst_iter);
+
+      if (!phi) {
+        break; // Meet non-phi instruction.  Done.
+      }
+
+      // Update the incoming block of this phi instruction
+      for (llvm::PHINode::block_iterator
+           ibb_iter = phi->block_begin(), ibb_end = phi->block_end();
+           ibb_iter != ibb_end; ++ibb_iter) {
+        if (*ibb_iter == old_basic_block) {
+          *ibb_iter = new_basic_block;
+        }
+      }
+    }
+  }
+}
+
+llvm::Value* GBCExpanderPass::ExpandToRuntime(runtime_support::RuntimeId rt,
+                                              llvm::CallInst& inst) {
+  // Some GBC intrinsic can directly replace with IBC runtime. "Directly" means
+  // the arguments passed to the GBC intrinsic are as the same as IBC runtime
+  // function, therefore only called function is needed to change.
+  unsigned num_args = inst.getNumArgOperands();
+
+  if (num_args <= 0) {
+    return irb_.CreateCall(irb_.GetRuntime(rt));
+  } else {
+    std::vector<llvm::Value*> args;
+    for (unsigned i = 0; i < num_args; i++) {
+      args.push_back(inst.getArgOperand(i));
+    }
+
+    return irb_.CreateCall(irb_.GetRuntime(rt), args);
+  }
+}
+
+void
+GBCExpanderPass::EmitStackOverflowCheck(llvm::Instruction* first_non_alloca) {
+  llvm::Function* func = first_non_alloca->getParent()->getParent();
+  llvm::Module* module = func->getParent();
+
+  // Call llvm intrinsic function to get frame address.
+  llvm::Function* frameaddress =
+      llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::frameaddress);
+
+  // The type of llvm::frameaddress is: i8* @llvm.frameaddress(i32)
+  llvm::Value* frame_address = irb_.CreateCall(frameaddress, irb_.getInt32(0));
+
+  // Cast i8* to int
+  frame_address = irb_.CreatePtrToInt(frame_address, irb_.getPtrEquivIntTy());
+
+  // Get thread.stack_end_
+  llvm::Value* stack_end =
+    irb_.Runtime().EmitLoadFromThreadOffset(art::Thread::StackEndOffset().Int32Value(),
+                                            irb_.getPtrEquivIntTy(),
+                                            kTBAARuntimeInfo);
+
+  // Check the frame address < thread.stack_end_ ?
+  llvm::Value* is_stack_overflow = irb_.CreateICmpULT(frame_address, stack_end);
+
+  llvm::BasicBlock* block_exception =
+      llvm::BasicBlock::Create(context_, "stack_overflow", func);
+
+  llvm::BasicBlock* block_continue =
+      llvm::BasicBlock::Create(context_, "stack_overflow_cont", func);
+
+  irb_.CreateCondBr(is_stack_overflow, block_exception, block_continue, kUnlikely);
+
+  // If stack overflow, throw exception.
+  irb_.SetInsertPoint(block_exception);
+  irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowStackOverflowException));
+
+  // Unwind.
+  llvm::Type* ret_type = func->getReturnType();
+  if (ret_type->isVoidTy()) {
+    irb_.CreateRetVoid();
+  } else {
+    // The return value is ignored when there's an exception. MethodCompiler
+    // returns zero value under the the corresponding return type  in this case.
+    // GBCExpander returns LLVM undef value here for brevity
+    irb_.CreateRet(llvm::UndefValue::get(ret_type));
+  }
+
+  irb_.SetInsertPoint(block_continue);
+}
+
+llvm::Value* GBCExpanderPass::EmitLoadDexCacheAddr(art::MemberOffset offset) {
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  return irb_.LoadFromObjectOffset(method_object_addr,
+                                   offset.Int32Value(),
+                                   irb_.getJObjectTy(),
+                                   kTBAAConstJObject);
+}
+
+llvm::Value*
+GBCExpanderPass::EmitLoadDexCacheStaticStorageFieldAddr(uint32_t type_idx) {
+  llvm::Value* static_storage_dex_cache_addr =
+    EmitLoadDexCacheAddr(art::mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset());
+
+  llvm::Value* type_idx_value = irb_.getPtrEquivInt(type_idx);
+
+  return EmitArrayGEP(static_storage_dex_cache_addr, type_idx_value, kObject);
+}
+
+llvm::Value*
+GBCExpanderPass::EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx) {
+  llvm::Value* resolved_type_dex_cache_addr =
+    EmitLoadDexCacheAddr(art::mirror::AbstractMethod::DexCacheResolvedTypesOffset());
+
+  llvm::Value* type_idx_value = irb_.getPtrEquivInt(type_idx);
+
+  return EmitArrayGEP(resolved_type_dex_cache_addr, type_idx_value, kObject);
+}
+
+llvm::Value* GBCExpanderPass::
+EmitLoadDexCacheResolvedMethodFieldAddr(uint32_t method_idx) {
+  llvm::Value* resolved_method_dex_cache_addr =
+    EmitLoadDexCacheAddr(art::mirror::AbstractMethod::DexCacheResolvedMethodsOffset());
+
+  llvm::Value* method_idx_value = irb_.getPtrEquivInt(method_idx);
+
+  return EmitArrayGEP(resolved_method_dex_cache_addr, method_idx_value, kObject);
+}
+
+llvm::Value* GBCExpanderPass::
+EmitLoadDexCacheStringFieldAddr(uint32_t string_idx) {
+  llvm::Value* string_dex_cache_addr =
+    EmitLoadDexCacheAddr(art::mirror::AbstractMethod::DexCacheStringsOffset());
+
+  llvm::Value* string_idx_value = irb_.getPtrEquivInt(string_idx);
+
+  return EmitArrayGEP(string_dex_cache_addr, string_idx_value, kObject);
+}
+
+llvm::Value* GBCExpanderPass::EmitLoadMethodObjectAddr() {
+  llvm::Function* parent_func = irb_.GetInsertBlock()->getParent();
+  return parent_func->arg_begin();
+}
+
+llvm::Value* GBCExpanderPass::EmitLoadArrayLength(llvm::Value* array) {
+  // Load array length
+  return irb_.LoadFromObjectOffset(array,
+                                   art::mirror::Array::LengthOffset().Int32Value(),
+                                   irb_.getJIntTy(),
+                                   kTBAAConstJObject);
+
+}
+
+llvm::Value*
+GBCExpanderPass::EmitLoadSDCalleeMethodObjectAddr(uint32_t callee_method_idx) {
+  llvm::Value* callee_method_object_field_addr =
+    EmitLoadDexCacheResolvedMethodFieldAddr(callee_method_idx);
+
+  return irb_.CreateLoad(callee_method_object_field_addr, kTBAARuntimeInfo);
+}
+
+llvm::Value* GBCExpanderPass::
+EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx, llvm::Value* this_addr) {
+  // Load class object of *this* pointer
+  llvm::Value* class_object_addr =
+    irb_.LoadFromObjectOffset(this_addr,
+                              art::mirror::Object::ClassOffset().Int32Value(),
+                              irb_.getJObjectTy(),
+                              kTBAAConstJObject);
+
+  // Load vtable address
+  llvm::Value* vtable_addr =
+    irb_.LoadFromObjectOffset(class_object_addr,
+                              art::mirror::Class::VTableOffset().Int32Value(),
+                              irb_.getJObjectTy(),
+                              kTBAAConstJObject);
+
+  // Load callee method object
+  llvm::Value* vtable_idx_value =
+    irb_.getPtrEquivInt(static_cast<uint64_t>(vtable_idx));
+
+  llvm::Value* method_field_addr =
+    EmitArrayGEP(vtable_addr, vtable_idx_value, kObject);
+
+  return irb_.CreateLoad(method_field_addr, kTBAAConstJObject);
+}
+
+// Emit Array GetElementPtr
+llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr,
+                                           llvm::Value* index_value,
+                                           JType elem_jty) {
+
+  int data_offset;
+  if (elem_jty == kLong || elem_jty == kDouble ||
+      (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) {
+    data_offset = art::mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  llvm::Constant* data_offset_value =
+    irb_.getPtrEquivInt(data_offset);
+
+  llvm::Type* elem_type = irb_.getJType(elem_jty);
+
+  llvm::Value* array_data_addr =
+    irb_.CreatePtrDisp(array_addr, data_offset_value,
+                       elem_type->getPointerTo());
+
+  return irb_.CreateGEP(array_data_addr, index_value);
+}
+
+llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  art::InvokeType invoke_type =
+      static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
+  bool is_static = (invoke_type == art::kStatic);
+  art::MethodReference target_method(dex_compilation_unit_->GetDexFile(),
+                                     LV2UInt(call_inst.getArgOperand(1)));
+
+  // Load *this* actual parameter
+  llvm::Value* this_addr = (!is_static) ? call_inst.getArgOperand(3) : NULL;
+
+  // Compute invoke related information for compiler decision
+  int vtable_idx = -1;
+  uintptr_t direct_code = 0;
+  uintptr_t direct_method = 0;
+  bool is_fast_path = driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc,
+                                                 invoke_type, target_method,
+                                                 vtable_idx,
+                                                 direct_code, direct_method,
+                                                 true);
+  // Load the method object
+  llvm::Value* callee_method_object_addr = NULL;
+
+  if (!is_fast_path) {
+    callee_method_object_addr =
+        EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index, invoke_type,
+                                                 this_addr, dex_pc, is_fast_path);
+  } else {
+    switch (invoke_type) {
+      case art::kStatic:
+      case art::kDirect:
+        if (direct_method != 0u &&
+            direct_method != static_cast<uintptr_t>(-1)) {
+          callee_method_object_addr =
+              irb_.CreateIntToPtr(irb_.getPtrEquivInt(direct_method),
+                                  irb_.getJObjectTy());
+        } else {
+          callee_method_object_addr =
+              EmitLoadSDCalleeMethodObjectAddr(target_method.dex_method_index);
+        }
+        break;
+
+      case art::kVirtual:
+        DCHECK(vtable_idx != -1);
+        callee_method_object_addr =
+            EmitLoadVirtualCalleeMethodObjectAddr(vtable_idx, this_addr);
+        break;
+
+      case art::kSuper:
+        LOG(FATAL) << "invoke-super should be promoted to invoke-direct in "
+        "the fast path.";
+        break;
+
+      case art::kInterface:
+        callee_method_object_addr =
+            EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index,
+                                                     invoke_type, this_addr,
+                                                     dex_pc, is_fast_path);
+        break;
+    }
+  }
+
+  // Load the actual parameter
+  std::vector<llvm::Value*> args;
+
+  args.push_back(callee_method_object_addr); // method object for callee
+
+  for (uint32_t i = 3; i < call_inst.getNumArgOperands(); ++i) {
+    args.push_back(call_inst.getArgOperand(i));
+  }
+
+  llvm::Value* code_addr;
+  llvm::Type* func_type = GetFunctionType(call_inst.getType(),
+                                          target_method.dex_method_index, is_static);
+  if (direct_code != 0u && direct_code != static_cast<uintptr_t>(-1)) {
+    code_addr =
+        irb_.CreateIntToPtr(irb_.getPtrEquivInt(direct_code),
+                            func_type->getPointerTo());
+  } else {
+    code_addr =
+        irb_.LoadFromObjectOffset(callee_method_object_addr,
+                                  art::mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
+                                  func_type->getPointerTo(), kTBAARuntimeInfo);
+  }
+
+  // Invoke callee
+  EmitUpdateDexPC(dex_pc);
+  llvm::Value* retval = irb_.CreateCall(code_addr, args);
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  return retval;
+}
+
+bool GBCExpanderPass::EmitIntrinsic(llvm::CallInst& call_inst,
+                                    llvm::Value** result) {
+  DCHECK(result != NULL);
+
+  uint32_t callee_method_idx = LV2UInt(call_inst.getArgOperand(1));
+  std::string callee_method_name(
+      PrettyMethod(callee_method_idx, *dex_compilation_unit_->GetDexFile()));
+
+  if (callee_method_name == "int java.lang.String.length()") {
+    return EmitIntrinsicStringLengthOrIsEmpty(call_inst, result,
+                                              false /* is_empty */);
+  }
+  if (callee_method_name == "boolean java.lang.String.isEmpty()") {
+    return EmitIntrinsicStringLengthOrIsEmpty(call_inst, result,
+                                              true /* is_empty */);
+  }
+
+  *result = NULL;
+  return false;
+}
+
+bool GBCExpanderPass::EmitIntrinsicStringLengthOrIsEmpty(llvm::CallInst& call_inst,
+                                                         llvm::Value** result,
+                                                         bool is_empty) {
+  art::InvokeType invoke_type =
+        static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
+  DCHECK_NE(invoke_type, art::kStatic);
+  DCHECK_EQ(call_inst.getNumArgOperands(), 4U);
+
+  llvm::Value* this_object = call_inst.getArgOperand(3);
+  llvm::Value* string_count =
+      irb_.LoadFromObjectOffset(this_object,
+                                art::mirror::String::CountOffset().Int32Value(),
+                                irb_.getJIntTy(),
+                                kTBAAConstJObject);
+  if (is_empty) {
+    llvm::Value* count_equals_zero = irb_.CreateICmpEQ(string_count,
+                                                       irb_.getJInt(0));
+    llvm::Value* is_empty = irb_.CreateSelect(count_equals_zero,
+                                              irb_.getJBoolean(true),
+                                              irb_.getJBoolean(false));
+    is_empty = SignOrZeroExtendCat1Types(is_empty, kBoolean);
+    *result = is_empty;
+  } else {
+    *result = string_count;
+  }
+  return true;
+}
+
+void GBCExpanderPass::Expand_TestSuspend(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+
+  llvm::Value* suspend_count =
+      irb_.Runtime().EmitLoadFromThreadOffset(art::Thread::ThreadFlagsOffset().Int32Value(),
+                                              irb_.getInt16Ty(),
+                                              kTBAARuntimeInfo);
+  llvm::Value* is_suspend = irb_.CreateICmpNE(suspend_count, irb_.getInt16(0));
+
+  llvm::BasicBlock* basic_block_suspend = CreateBasicBlockWithDexPC(dex_pc, "suspend");
+  llvm::BasicBlock* basic_block_cont = CreateBasicBlockWithDexPC(dex_pc, "suspend_cont");
+
+  irb_.CreateCondBr(is_suspend, basic_block_suspend, basic_block_cont, kUnlikely);
+
+  irb_.SetInsertPoint(basic_block_suspend);
+  if (dex_pc != art::DexFile::kDexNoIndex) {
+    EmitUpdateDexPC(dex_pc);
+  }
+  irb_.Runtime().EmitTestSuspend();
+
+  llvm::BasicBlock* basic_block_exception = CreateBasicBlockWithDexPC(dex_pc, "exception");
+  llvm::Value* exception_pending = irb_.Runtime().EmitIsExceptionPending();
+  irb_.CreateCondBr(exception_pending, basic_block_exception, basic_block_cont, kUnlikely);
+
+  irb_.SetInsertPoint(basic_block_exception);
+  llvm::Type* ret_type = call_inst.getParent()->getParent()->getReturnType();
+  if (ret_type->isVoidTy()) {
+    irb_.CreateRetVoid();
+  } else {
+    // The return value is ignored when there's an exception.
+    irb_.CreateRet(llvm::UndefValue::get(ret_type));
+  }
+
+  irb_.SetInsertPoint(basic_block_cont);
+  return;
+}
+
+void GBCExpanderPass::Expand_MarkGCCard(llvm::CallInst& call_inst) {
+  irb_.Runtime().EmitMarkGCCard(call_inst.getArgOperand(0), call_inst.getArgOperand(1));
+  return;
+}
+
+llvm::Value*
+GBCExpanderPass::Expand_LoadStringFromDexCache(llvm::Value* string_idx_value) {
+  uint32_t string_idx =
+    llvm::cast<llvm::ConstantInt>(string_idx_value)->getZExtValue();
+
+  llvm::Value* string_field_addr = EmitLoadDexCacheStringFieldAddr(string_idx);
+
+  return irb_.CreateLoad(string_field_addr, kTBAARuntimeInfo);
+}
+
+llvm::Value*
+GBCExpanderPass::Expand_LoadTypeFromDexCache(llvm::Value* type_idx_value) {
+  uint32_t type_idx =
+    llvm::cast<llvm::ConstantInt>(type_idx_value)->getZExtValue();
+
+  llvm::Value* type_field_addr =
+    EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
+
+  return irb_.CreateLoad(type_field_addr, kTBAARuntimeInfo);
+}
+
+void GBCExpanderPass::Expand_LockObject(llvm::Value* obj) {
+  rtb_.EmitLockObject(obj);
+  return;
+}
+
+void GBCExpanderPass::Expand_UnlockObject(llvm::Value* obj) {
+  rtb_.EmitUnlockObject(obj);
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_ArrayGet(llvm::Value* array_addr,
+                                              llvm::Value* index_value,
+                                              JType elem_jty) {
+  llvm::Value* array_elem_addr =
+    EmitArrayGEP(array_addr, index_value, elem_jty);
+
+  return irb_.CreateLoad(array_elem_addr, kTBAAHeapArray, elem_jty);
+}
+
+void GBCExpanderPass::Expand_ArrayPut(llvm::Value* new_value,
+                                      llvm::Value* array_addr,
+                                      llvm::Value* index_value,
+                                      JType elem_jty) {
+  llvm::Value* array_elem_addr =
+    EmitArrayGEP(array_addr, index_value, elem_jty);
+
+  irb_.CreateStore(new_value, array_elem_addr, kTBAAHeapArray, elem_jty);
+
+  return;
+}
+
+void GBCExpanderPass::Expand_FilledNewArray(llvm::CallInst& call_inst) {
+  // Most of the codes refer to MethodCompiler::EmitInsn_FilledNewArray
+  llvm::Value* array = call_inst.getArgOperand(0);
+
+  uint32_t element_jty =
+    llvm::cast<llvm::ConstantInt>(call_inst.getArgOperand(1))->getZExtValue();
+
+  DCHECK(call_inst.getNumArgOperands() > 2);
+  unsigned num_elements = (call_inst.getNumArgOperands() - 2);
+
+  bool is_elem_int_ty = (static_cast<JType>(element_jty) == kInt);
+
+  uint32_t alignment;
+  llvm::Constant* elem_size;
+  llvm::PointerType* field_type;
+
+  // NOTE: Currently filled-new-array only supports 'L', '[', and 'I'
+  // as the element, thus we are only checking 2 cases: primitive int and
+  // non-primitive type.
+  if (is_elem_int_ty) {
+    alignment = sizeof(int32_t);
+    elem_size = irb_.getPtrEquivInt(sizeof(int32_t));
+    field_type = irb_.getJIntTy()->getPointerTo();
+  } else {
+    alignment = irb_.getSizeOfPtrEquivInt();
+    elem_size = irb_.getSizeOfPtrEquivIntValue();
+    field_type = irb_.getJObjectTy()->getPointerTo();
+  }
+
+  llvm::Value* data_field_offset =
+    irb_.getPtrEquivInt(art::mirror::Array::DataOffset(alignment).Int32Value());
+
+  llvm::Value* data_field_addr =
+    irb_.CreatePtrDisp(array, data_field_offset, field_type);
+
+  for (unsigned i = 0; i < num_elements; ++i) {
+    // Values to fill the array begin at the 3rd argument
+    llvm::Value* reg_value = call_inst.getArgOperand(2 + i);
+
+    irb_.CreateStore(reg_value, data_field_addr, kTBAAHeapArray);
+
+    data_field_addr =
+      irb_.CreatePtrDisp(data_field_addr, elem_size, field_type);
+  }
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_IGetFast(llvm::Value* field_offset_value,
+                                              llvm::Value* /*is_volatile_value*/,
+                                              llvm::Value* object_addr,
+                                              JType field_jty) {
+  int field_offset =
+    llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
+
+  DCHECK_GE(field_offset, 0);
+
+  llvm::PointerType* field_type =
+    irb_.getJType(field_jty)->getPointerTo();
+
+  field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+  llvm::Value* field_addr =
+    irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
+
+  // TODO: Check is_volatile.  We need to generate atomic load instruction
+  // when is_volatile is true.
+  return irb_.CreateLoad(field_addr, kTBAAHeapInstance, field_jty);
+}
+
+void GBCExpanderPass::Expand_IPutFast(llvm::Value* field_offset_value,
+                                      llvm::Value* /* is_volatile_value */,
+                                      llvm::Value* object_addr,
+                                      llvm::Value* new_value,
+                                      JType field_jty) {
+  int field_offset =
+    llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
+
+  DCHECK_GE(field_offset, 0);
+
+  llvm::PointerType* field_type =
+    irb_.getJType(field_jty)->getPointerTo();
+
+  field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+  llvm::Value* field_addr =
+    irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
+
+  // TODO: Check is_volatile.  We need to generate atomic store instruction
+  // when is_volatile is true.
+  irb_.CreateStore(new_value, field_addr, kTBAAHeapInstance, field_jty);
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_SGetFast(llvm::Value* static_storage_addr,
+                                              llvm::Value* field_offset_value,
+                                              llvm::Value* /*is_volatile_value*/,
+                                              JType field_jty) {
+  int field_offset =
+    llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
+
+  DCHECK_GE(field_offset, 0);
+
+  llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+  llvm::Value* static_field_addr =
+    irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
+                       irb_.getJType(field_jty)->getPointerTo());
+
+  // TODO: Check is_volatile.  We need to generate atomic store instruction
+  // when is_volatile is true.
+  return irb_.CreateLoad(static_field_addr, kTBAAHeapStatic, field_jty);
+}
+
+void GBCExpanderPass::Expand_SPutFast(llvm::Value* static_storage_addr,
+                                      llvm::Value* field_offset_value,
+                                      llvm::Value* /* is_volatile_value */,
+                                      llvm::Value* new_value,
+                                      JType field_jty) {
+  int field_offset =
+    llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
+
+  DCHECK_GE(field_offset, 0);
+
+  llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+  llvm::Value* static_field_addr =
+    irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
+                       irb_.getJType(field_jty)->getPointerTo());
+
+  // TODO: Check is_volatile.  We need to generate atomic store instruction
+  // when is_volatile is true.
+  irb_.CreateStore(new_value, static_field_addr, kTBAAHeapStatic, field_jty);
+
+  return;
+}
+
+llvm::Value*
+GBCExpanderPass::Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr) {
+  return irb_.LoadFromObjectOffset(method_object_addr,
+                                   art::mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+                                   irb_.getJObjectTy(),
+                                   kTBAAConstJObject);
+}
+
+llvm::Value*
+GBCExpanderPass::Expand_LoadClassSSBFromDexCache(llvm::Value* type_idx_value) {
+  uint32_t type_idx =
+    llvm::cast<llvm::ConstantInt>(type_idx_value)->getZExtValue();
+
+  llvm::Value* storage_field_addr =
+    EmitLoadDexCacheStaticStorageFieldAddr(type_idx);
+
+  return irb_.CreateLoad(storage_field_addr, kTBAARuntimeInfo);
+}
+
+llvm::Value*
+GBCExpanderPass::Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value) {
+  uint32_t callee_method_idx =
+    llvm::cast<llvm::ConstantInt>(callee_method_idx_value)->getZExtValue();
+
+  return EmitLoadSDCalleeMethodObjectAddr(callee_method_idx);
+}
+
+llvm::Value* GBCExpanderPass::Expand_GetVirtualCalleeMethodObjAddrFast(
+    llvm::Value* vtable_idx_value,
+    llvm::Value* this_addr) {
+  int vtable_idx =
+    llvm::cast<llvm::ConstantInt>(vtable_idx_value)->getSExtValue();
+
+  return EmitLoadVirtualCalleeMethodObjectAddr(vtable_idx, this_addr);
+}
+
+llvm::Value* GBCExpanderPass::Expand_Invoke(llvm::CallInst& call_inst) {
+  // Most of the codes refer to MethodCompiler::EmitInsn_Invoke
+  llvm::Value* callee_method_object_addr = call_inst.getArgOperand(0);
+  unsigned num_args = call_inst.getNumArgOperands();
+  llvm::Type* ret_type = call_inst.getType();
+
+  // Determine the function type of the callee method
+  std::vector<llvm::Type*> args_type;
+  std::vector<llvm::Value*> args;
+  for (unsigned i = 0; i < num_args; i++) {
+    args.push_back(call_inst.getArgOperand(i));
+    args_type.push_back(args[i]->getType());
+  }
+
+  llvm::FunctionType* callee_method_type =
+    llvm::FunctionType::get(ret_type, args_type, false);
+
+  llvm::Value* code_addr =
+    irb_.LoadFromObjectOffset(callee_method_object_addr,
+                              art::mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
+                              callee_method_type->getPointerTo(),
+                              kTBAARuntimeInfo);
+
+  // Invoke callee
+  llvm::Value* retval = irb_.CreateCall(code_addr, args);
+
+  return retval;
+}
+
+llvm::Value* GBCExpanderPass::Expand_DivRem(llvm::CallInst& call_inst,
+                                            bool is_div, JType op_jty) {
+  llvm::Value* dividend = call_inst.getArgOperand(0);
+  llvm::Value* divisor = call_inst.getArgOperand(1);
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  EmitGuard_DivZeroException(dex_pc, divisor, op_jty);
+  // Most of the codes refer to MethodCompiler::EmitIntDivRemResultComputation
+
+  // Check the special case: MININT / -1 = MININT
+  // That case will cause overflow, which is undefined behavior in llvm.
+  // So we check the divisor is -1 or not, if the divisor is -1, we do
+  // the special path to avoid undefined behavior.
+  llvm::Type* op_type = irb_.getJType(op_jty);
+  llvm::Value* zero = irb_.getJZero(op_jty);
+  llvm::Value* neg_one = llvm::ConstantInt::getSigned(op_type, -1);
+
+  llvm::Function* parent = irb_.GetInsertBlock()->getParent();
+  llvm::BasicBlock* eq_neg_one = llvm::BasicBlock::Create(context_, "", parent);
+  llvm::BasicBlock* ne_neg_one = llvm::BasicBlock::Create(context_, "", parent);
+  llvm::BasicBlock* neg_one_cont =
+    llvm::BasicBlock::Create(context_, "", parent);
+
+  llvm::Value* is_equal_neg_one = irb_.CreateICmpEQ(divisor, neg_one);
+  irb_.CreateCondBr(is_equal_neg_one, eq_neg_one, ne_neg_one, kUnlikely);
+
+  // If divisor == -1
+  irb_.SetInsertPoint(eq_neg_one);
+  llvm::Value* eq_result;
+  if (is_div) {
+    // We can just change from "dividend div -1" to "neg dividend". The sub
+    // don't care the sign/unsigned because of two's complement representation.
+    // And the behavior is what we want:
+    //  -(2^n)        (2^n)-1
+    //  MININT  < k <= MAXINT    ->     mul k -1  =  -k
+    //  MININT == k              ->     mul k -1  =   k
+    //
+    // LLVM use sub to represent 'neg'
+    eq_result = irb_.CreateSub(zero, dividend);
+  } else {
+    // Everything modulo -1 will be 0.
+    eq_result = zero;
+  }
+  irb_.CreateBr(neg_one_cont);
+
+  // If divisor != -1, just do the division.
+  irb_.SetInsertPoint(ne_neg_one);
+  llvm::Value* ne_result;
+  if (is_div) {
+    ne_result = irb_.CreateSDiv(dividend, divisor);
+  } else {
+    ne_result = irb_.CreateSRem(dividend, divisor);
+  }
+  irb_.CreateBr(neg_one_cont);
+
+  irb_.SetInsertPoint(neg_one_cont);
+  llvm::PHINode* result = irb_.CreatePHI(op_type, 2);
+  result->addIncoming(eq_result, eq_neg_one);
+  result->addIncoming(ne_result, ne_neg_one);
+
+  return result;
+}
+
+void GBCExpanderPass::Expand_AllocaShadowFrame(llvm::Value* num_vregs_value) {
+  // Most of the codes refer to MethodCompiler::EmitPrologueAllocShadowFrame and
+  // MethodCompiler::EmitPushShadowFrame
+  uint16_t num_vregs =
+    llvm::cast<llvm::ConstantInt>(num_vregs_value)->getZExtValue();
+
+  llvm::StructType* shadow_frame_type =
+    irb_.getShadowFrameTy(num_vregs);
+
+  // Create allocas at the start of entry block.
+  llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
+  llvm::BasicBlock* entry_block = &func_->front();
+  irb_.SetInsertPoint(&entry_block->front());
+
+  shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
+
+  // Alloca a pointer to old shadow frame
+  old_shadow_frame_ =
+    irb_.CreateAlloca(shadow_frame_type->getElementType(0)->getPointerTo());
+
+  irb_.restoreIP(irb_ip_original);
+
+  // Push the shadow frame
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* shadow_frame_upcast =
+    irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
+
+  llvm::Value* result = rtb_.EmitPushShadowFrame(shadow_frame_upcast,
+                                                 method_object_addr,
+                                                 num_vregs);
+
+  irb_.CreateStore(result, old_shadow_frame_, kTBAARegister);
+
+  return;
+}
+
+void GBCExpanderPass::Expand_SetVReg(llvm::Value* entry_idx,
+                                     llvm::Value* value) {
+  unsigned vreg_idx = LV2UInt(entry_idx);
+  DCHECK_LT(vreg_idx, dex_compilation_unit_->GetCodeItem()->registers_size_);
+
+  llvm::Value* vreg_addr = shadow_frame_vreg_addresses_[vreg_idx];
+  if (UNLIKELY(vreg_addr == NULL)) {
+    DCHECK(shadow_frame_ != NULL);
+
+    llvm::Value* gep_index[] = {
+      irb_.getInt32(0), // No pointer displacement
+      irb_.getInt32(1), // VRegs
+      entry_idx // Pointer field
+    };
+
+    // A shadow frame address must dominate every use in the function so we
+    // place it in the entry block right after the allocas.
+    llvm::BasicBlock::iterator first_non_alloca = func_->getEntryBlock().begin();
+    while (llvm::isa<llvm::AllocaInst>(first_non_alloca)) {
+      ++first_non_alloca;
+    }
+
+    llvm::IRBuilderBase::InsertPoint ip = irb_.saveIP();
+    irb_.SetInsertPoint(static_cast<llvm::Instruction*>(first_non_alloca));
+    vreg_addr = irb_.CreateGEP(shadow_frame_, gep_index);
+    shadow_frame_vreg_addresses_[vreg_idx] = vreg_addr;
+    irb_.restoreIP(ip);
+  }
+
+  irb_.CreateStore(value,
+                   irb_.CreateBitCast(vreg_addr, value->getType()->getPointerTo()),
+                   kTBAAShadowFrame);
+  return;
+}
+
+void GBCExpanderPass::Expand_PopShadowFrame() {
+  if (old_shadow_frame_ == NULL) {
+    return;
+  }
+  rtb_.EmitPopShadowFrame(irb_.CreateLoad(old_shadow_frame_, kTBAARegister));
+  return;
+}
+
+void GBCExpanderPass::Expand_UpdateDexPC(llvm::Value* dex_pc_value) {
+  irb_.StoreToObjectOffset(shadow_frame_,
+                           art::ShadowFrame::DexPCOffset(),
+                           dex_pc_value,
+                           kTBAAShadowFrame);
+  return;
+}
+
+void GBCExpanderPass::InsertStackOverflowCheck(llvm::Function& func) {
+  // All alloca instructions are generated in the first basic block of the
+  // function, and there are no alloca instructions after the first non-alloca
+  // instruction.
+
+  llvm::BasicBlock* first_basic_block = &func.front();
+
+  // Look for first non-alloca instruction
+  llvm::BasicBlock::iterator first_non_alloca = first_basic_block->begin();
+  while (llvm::isa<llvm::AllocaInst>(first_non_alloca)) {
+    ++first_non_alloca;
+  }
+
+  irb_.SetInsertPoint(first_non_alloca);
+
+  // Insert stack overflow check codes before first_non_alloca (i.e., after all
+  // alloca instructions)
+  EmitStackOverflowCheck(&*first_non_alloca);
+
+  irb_.Runtime().EmitTestSuspend();
+
+  llvm::BasicBlock* next_basic_block = irb_.GetInsertBlock();
+  if (next_basic_block != first_basic_block) {
+    // Splice the rest of the instruction to the continuing basic block
+    next_basic_block->getInstList().splice(
+        irb_.GetInsertPoint(), first_basic_block->getInstList(),
+        first_non_alloca, first_basic_block->end());
+
+    // Rewrite the basic block
+    RewriteBasicBlock(next_basic_block);
+
+    // Update the phi-instructions in the successor basic block
+    UpdatePhiInstruction(first_basic_block, irb_.GetInsertBlock());
+  }
+
+  // We have changed the basic block
+  changed_ = true;
+}
+
+// ==== High-level intrinsic expander ==========================================
+
+llvm::Value* GBCExpanderPass::Expand_FPCompare(llvm::Value* src1_value,
+                                               llvm::Value* src2_value,
+                                               bool gt_bias) {
+  llvm::Value* cmp_eq = irb_.CreateFCmpOEQ(src1_value, src2_value);
+  llvm::Value* cmp_lt;
+
+  if (gt_bias) {
+    cmp_lt = irb_.CreateFCmpOLT(src1_value, src2_value);
+  } else {
+    cmp_lt = irb_.CreateFCmpULT(src1_value, src2_value);
+  }
+
+  return EmitCompareResultSelection(cmp_eq, cmp_lt);
+}
+
+llvm::Value* GBCExpanderPass::Expand_LongCompare(llvm::Value* src1_value, llvm::Value* src2_value) {
+  llvm::Value* cmp_eq = irb_.CreateICmpEQ(src1_value, src2_value);
+  llvm::Value* cmp_lt = irb_.CreateICmpSLT(src1_value, src2_value);
+
+  return EmitCompareResultSelection(cmp_eq, cmp_lt);
+}
+
+llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq,
+                                                         llvm::Value* cmp_lt) {
+
+  llvm::Constant* zero = irb_.getJInt(0);
+  llvm::Constant* pos1 = irb_.getJInt(1);
+  llvm::Constant* neg1 = irb_.getJInt(-1);
+
+  llvm::Value* result_lt = irb_.CreateSelect(cmp_lt, neg1, pos1);
+  llvm::Value* result_eq = irb_.CreateSelect(cmp_eq, zero, result_lt);
+
+  return result_eq;
+}
+
+llvm::Value* GBCExpanderPass::Expand_IntegerShift(llvm::Value* src1_value,
+                                                  llvm::Value* src2_value,
+                                                  IntegerShiftKind kind,
+                                                  JType op_jty) {
+  DCHECK(op_jty == kInt || op_jty == kLong);
+
+  // Mask and zero-extend RHS properly
+  if (op_jty == kInt) {
+    src2_value = irb_.CreateAnd(src2_value, 0x1f);
+  } else {
+    llvm::Value* masked_src2_value = irb_.CreateAnd(src2_value, 0x3f);
+    src2_value = irb_.CreateZExt(masked_src2_value, irb_.getJLongTy());
+  }
+
+  // Create integer shift llvm instruction
+  switch (kind) {
+  case kIntegerSHL:
+    return irb_.CreateShl(src1_value, src2_value);
+
+  case kIntegerSHR:
+    return irb_.CreateAShr(src1_value, src2_value);
+
+  case kIntegerUSHR:
+    return irb_.CreateLShr(src1_value, src2_value);
+
+  default:
+    LOG(FATAL) << "Unknown integer shift kind: " << kind;
+    return NULL;
+  }
+}
+
+llvm::Value* GBCExpanderPass::SignOrZeroExtendCat1Types(llvm::Value* value, JType jty) {
+  switch (jty) {
+    case kBoolean:
+    case kChar:
+      return irb_.CreateZExt(value, irb_.getJType(kInt));
+    case kByte:
+    case kShort:
+      return irb_.CreateSExt(value, irb_.getJType(kInt));
+    case kVoid:
+    case kInt:
+    case kLong:
+    case kFloat:
+    case kDouble:
+    case kObject:
+      return value;  // Nothing to do.
+    default:
+      LOG(FATAL) << "Unknown java type: " << jty;
+      return NULL;
+  }
+}
+
+llvm::Value* GBCExpanderPass::TruncateCat1Types(llvm::Value* value, JType jty) {
+  switch (jty) {
+    case kBoolean:
+    case kChar:
+    case kByte:
+    case kShort:
+      return irb_.CreateTrunc(value, irb_.getJType(jty));
+    case kVoid:
+    case kInt:
+    case kLong:
+    case kFloat:
+    case kDouble:
+    case kObject:
+      return value;  // Nothing to do.
+    default:
+      LOG(FATAL) << "Unknown java type: " << jty;
+      return NULL;
+  }
+}
+
+llvm::Value* GBCExpanderPass::Expand_HLArrayGet(llvm::CallInst& call_inst,
+                                                JType elem_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* array_addr = call_inst.getArgOperand(1);
+  llvm::Value* index_value = call_inst.getArgOperand(2);
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
+  EmitGuard_ArrayIndexOutOfBoundsException(dex_pc, array_addr, index_value,
+                                           opt_flags);
+
+  llvm::Value* array_elem_addr = EmitArrayGEP(array_addr, index_value, elem_jty);
+
+  llvm::Value* array_elem_value = irb_.CreateLoad(array_elem_addr, kTBAAHeapArray, elem_jty);
+
+  return SignOrZeroExtendCat1Types(array_elem_value, elem_jty);
+}
+
+
+void GBCExpanderPass::Expand_HLArrayPut(llvm::CallInst& call_inst,
+                                        JType elem_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* new_value = call_inst.getArgOperand(1);
+  llvm::Value* array_addr = call_inst.getArgOperand(2);
+  llvm::Value* index_value = call_inst.getArgOperand(3);
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
+  EmitGuard_ArrayIndexOutOfBoundsException(dex_pc, array_addr, index_value,
+                                           opt_flags);
+
+  new_value = TruncateCat1Types(new_value, elem_jty);
+
+  llvm::Value* array_elem_addr = EmitArrayGEP(array_addr, index_value, elem_jty);
+
+  if (elem_jty == kObject) { // If put an object, check the type, and mark GC card table.
+    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::CheckPutArrayElement);
+
+    irb_.CreateCall2(runtime_func, new_value, array_addr);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    EmitMarkGCCard(new_value, array_addr);
+  }
+
+  irb_.CreateStore(new_value, array_elem_addr, kTBAAHeapArray, elem_jty);
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
+                                            JType field_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* object_addr = call_inst.getArgOperand(1);
+  uint32_t field_idx = LV2UInt(call_inst.getArgOperand(2));
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
+
+  llvm::Value* field_value;
+
+  int field_offset;
+  bool is_volatile;
+  bool is_fast_path = driver_->ComputeInstanceFieldInfo(
+    field_idx, dex_compilation_unit_, field_offset, is_volatile, false);
+
+  if (!is_fast_path) {
+    llvm::Function* runtime_func;
+
+    if (field_jty == kObject) {
+      runtime_func = irb_.GetRuntime(runtime_support::GetObjectInstance);
+    } else if (field_jty == kLong || field_jty == kDouble) {
+      runtime_func = irb_.GetRuntime(runtime_support::Get64Instance);
+    } else {
+      runtime_func = irb_.GetRuntime(runtime_support::Get32Instance);
+    }
+
+    llvm::ConstantInt* field_idx_value = irb_.getInt32(field_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    EmitUpdateDexPC(dex_pc);
+
+    field_value = irb_.CreateCall3(runtime_func, field_idx_value,
+                                   method_object_addr, object_addr);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    if (field_jty == kFloat || field_jty == kDouble) {
+      field_value = irb_.CreateBitCast(field_value, irb_.getJType(field_jty));
+    }
+  } else {
+    DCHECK_GE(field_offset, 0);
+
+    llvm::PointerType* field_type =
+      irb_.getJType(field_jty)->getPointerTo();
+
+    llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+    llvm::Value* field_addr =
+      irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
+
+    field_value = irb_.CreateLoad(field_addr, kTBAAHeapInstance, field_jty);
+    field_value = SignOrZeroExtendCat1Types(field_value, field_jty);
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kLoadLoad);
+    }
+  }
+
+  return field_value;
+}
+
+void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
+                                    JType field_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* new_value = call_inst.getArgOperand(1);
+  llvm::Value* object_addr = call_inst.getArgOperand(2);
+  uint32_t field_idx = LV2UInt(call_inst.getArgOperand(3));
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
+
+  int field_offset;
+  bool is_volatile;
+  bool is_fast_path = driver_->ComputeInstanceFieldInfo(
+    field_idx, dex_compilation_unit_, field_offset, is_volatile, true);
+
+  if (!is_fast_path) {
+    llvm::Function* runtime_func;
+
+    if (field_jty == kFloat) {
+      new_value = irb_.CreateBitCast(new_value, irb_.getJType(kInt));
+    } else if (field_jty == kDouble) {
+      new_value = irb_.CreateBitCast(new_value, irb_.getJType(kLong));
+    }
+
+    if (field_jty == kObject) {
+      runtime_func = irb_.GetRuntime(runtime_support::SetObjectInstance);
+    } else if (field_jty == kLong || field_jty == kDouble) {
+      runtime_func = irb_.GetRuntime(runtime_support::Set64Instance);
+    } else {
+      runtime_func = irb_.GetRuntime(runtime_support::Set32Instance);
+    }
+
+    llvm::Value* field_idx_value = irb_.getInt32(field_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    EmitUpdateDexPC(dex_pc);
+
+    irb_.CreateCall4(runtime_func, field_idx_value,
+                     method_object_addr, object_addr, new_value);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+  } else {
+    DCHECK_GE(field_offset, 0);
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kStoreStore);
+    }
+
+    llvm::PointerType* field_type =
+      irb_.getJType(field_jty)->getPointerTo();
+
+    llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+    llvm::Value* field_addr =
+      irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
+
+    new_value = TruncateCat1Types(new_value, field_jty);
+    irb_.CreateStore(new_value, field_addr, kTBAAHeapInstance, field_jty);
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kLoadLoad);
+    }
+
+    if (field_jty == kObject) { // If put an object, mark the GC card table.
+      EmitMarkGCCard(new_value, object_addr);
+    }
+  }
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::EmitLoadConstantClass(uint32_t dex_pc,
+                                                    uint32_t type_idx) {
+  if (!driver_->CanAccessTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
+                                           *dex_compilation_unit_->GetDexFile(), type_idx)) {
+    llvm::Value* type_idx_value = irb_.getInt32(type_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+    llvm::Function* runtime_func =
+      irb_.GetRuntime(runtime_support::InitializeTypeAndVerifyAccess);
+
+    EmitUpdateDexPC(dex_pc);
+
+    llvm::Value* type_object_addr =
+      irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    return type_object_addr;
+
+  } else {
+    // Try to load the class (type) object from the test cache.
+    llvm::Value* type_field_addr =
+      EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
+
+    llvm::Value* type_object_addr = irb_.CreateLoad(type_field_addr, kTBAARuntimeInfo);
+
+    if (driver_->CanAssumeTypeIsPresentInDexCache(*dex_compilation_unit_->GetDexFile(), type_idx)) {
+      return type_object_addr;
+    }
+
+    llvm::BasicBlock* block_original = irb_.GetInsertBlock();
+
+    // Test whether class (type) object is in the dex cache or not
+    llvm::Value* equal_null =
+      irb_.CreateICmpEQ(type_object_addr, irb_.getJNull());
+
+    llvm::BasicBlock* block_cont =
+      CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+    llvm::BasicBlock* block_load_class =
+      CreateBasicBlockWithDexPC(dex_pc, "load_class");
+
+    irb_.CreateCondBr(equal_null, block_load_class, block_cont, kUnlikely);
+
+    // Failback routine to load the class object
+    irb_.SetInsertPoint(block_load_class);
+
+    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::InitializeType);
+
+    llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+    EmitUpdateDexPC(dex_pc);
+
+    llvm::Value* loaded_type_object_addr =
+      irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    llvm::BasicBlock* block_after_load_class = irb_.GetInsertBlock();
+
+    irb_.CreateBr(block_cont);
+
+    // Now the class object must be loaded
+    irb_.SetInsertPoint(block_cont);
+
+    llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
+
+    phi->addIncoming(type_object_addr, block_original);
+    phi->addIncoming(loaded_type_object_addr, block_after_load_class);
+
+    return phi;
+  }
+}
+
+llvm::Value* GBCExpanderPass::EmitLoadStaticStorage(uint32_t dex_pc,
+                                                    uint32_t type_idx) {
+  llvm::BasicBlock* block_load_static =
+    CreateBasicBlockWithDexPC(dex_pc, "load_static");
+
+  llvm::BasicBlock* block_cont = CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+  // Load static storage from dex cache
+  llvm::Value* storage_field_addr =
+    EmitLoadDexCacheStaticStorageFieldAddr(type_idx);
+
+  llvm::Value* storage_object_addr = irb_.CreateLoad(storage_field_addr, kTBAARuntimeInfo);
+
+  llvm::BasicBlock* block_original = irb_.GetInsertBlock();
+
+  // Test: Is the static storage of this class initialized?
+  llvm::Value* equal_null =
+    irb_.CreateICmpEQ(storage_object_addr, irb_.getJNull());
+
+  irb_.CreateCondBr(equal_null, block_load_static, block_cont, kUnlikely);
+
+  // Failback routine to load the class object
+  irb_.SetInsertPoint(block_load_static);
+
+  llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::InitializeStaticStorage);
+
+  llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
+
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+  EmitUpdateDexPC(dex_pc);
+
+  llvm::Value* loaded_storage_object_addr =
+    irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  llvm::BasicBlock* block_after_load_static = irb_.GetInsertBlock();
+
+  irb_.CreateBr(block_cont);
+
+  // Now the class object must be loaded
+  irb_.SetInsertPoint(block_cont);
+
+  llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
+
+  phi->addIncoming(storage_object_addr, block_original);
+  phi->addIncoming(loaded_storage_object_addr, block_after_load_static);
+
+  return phi;
+}
+
+llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
+                                            JType field_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
+
+  int field_offset;
+  int ssb_index;
+  bool is_referrers_class;
+  bool is_volatile;
+
+  bool is_fast_path = driver_->ComputeStaticFieldInfo(
+    field_idx, dex_compilation_unit_, field_offset, ssb_index,
+    is_referrers_class, is_volatile, false);
+
+  llvm::Value* static_field_value;
+
+  if (!is_fast_path) {
+    llvm::Function* runtime_func;
+
+    if (field_jty == kObject) {
+      runtime_func = irb_.GetRuntime(runtime_support::GetObjectStatic);
+    } else if (field_jty == kLong || field_jty == kDouble) {
+      runtime_func = irb_.GetRuntime(runtime_support::Get64Static);
+    } else {
+      runtime_func = irb_.GetRuntime(runtime_support::Get32Static);
+    }
+
+    llvm::Constant* field_idx_value = irb_.getInt32(field_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    EmitUpdateDexPC(dex_pc);
+
+    static_field_value =
+      irb_.CreateCall2(runtime_func, field_idx_value, method_object_addr);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    if (field_jty == kFloat || field_jty == kDouble) {
+      static_field_value = irb_.CreateBitCast(static_field_value, irb_.getJType(field_jty));
+    }
+  } else {
+    DCHECK_GE(field_offset, 0);
+
+    llvm::Value* static_storage_addr = NULL;
+
+    if (is_referrers_class) {
+      // Fast path, static storage base is this method's class
+      llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+      static_storage_addr =
+        irb_.LoadFromObjectOffset(method_object_addr,
+                                  art::mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+                                  irb_.getJObjectTy(),
+                                  kTBAAConstJObject);
+    } else {
+      // Medium path, static storage base in a different class which
+      // requires checks that the other class is initialized
+      DCHECK_GE(ssb_index, 0);
+      static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
+    }
+
+    llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+    llvm::Value* static_field_addr =
+      irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
+                         irb_.getJType(field_jty)->getPointerTo());
+
+    static_field_value = irb_.CreateLoad(static_field_addr, kTBAAHeapStatic, field_jty);
+    static_field_value = SignOrZeroExtendCat1Types(static_field_value, field_jty);
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kLoadLoad);
+    }
+  }
+
+  return static_field_value;
+}
+
+void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
+                                    JType field_jty) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
+  llvm::Value* new_value = call_inst.getArgOperand(1);
+
+  if (field_jty == kFloat || field_jty == kDouble) {
+    new_value = irb_.CreateBitCast(new_value, irb_.getJType(field_jty));
+  }
+
+  int field_offset;
+  int ssb_index;
+  bool is_referrers_class;
+  bool is_volatile;
+
+  bool is_fast_path = driver_->ComputeStaticFieldInfo(
+    field_idx, dex_compilation_unit_, field_offset, ssb_index,
+    is_referrers_class, is_volatile, true);
+
+  if (!is_fast_path) {
+    llvm::Function* runtime_func;
+
+    if (field_jty == kObject) {
+      runtime_func = irb_.GetRuntime(runtime_support::SetObjectStatic);
+    } else if (field_jty == kLong || field_jty == kDouble) {
+      runtime_func = irb_.GetRuntime(runtime_support::Set64Static);
+    } else {
+      runtime_func = irb_.GetRuntime(runtime_support::Set32Static);
+    }
+
+    if (field_jty == kFloat) {
+      new_value = irb_.CreateBitCast(new_value, irb_.getJType(kInt));
+    } else if (field_jty == kDouble) {
+      new_value = irb_.CreateBitCast(new_value, irb_.getJType(kLong));
+    }
+
+    llvm::Constant* field_idx_value = irb_.getInt32(field_idx);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    EmitUpdateDexPC(dex_pc);
+
+    irb_.CreateCall3(runtime_func, field_idx_value,
+                     method_object_addr, new_value);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+  } else {
+    DCHECK_GE(field_offset, 0);
+
+    llvm::Value* static_storage_addr = NULL;
+
+    if (is_referrers_class) {
+      // Fast path, static storage base is this method's class
+      llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+      static_storage_addr =
+        irb_.LoadFromObjectOffset(method_object_addr,
+                                  art::mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+                                  irb_.getJObjectTy(),
+                                  kTBAAConstJObject);
+    } else {
+      // Medium path, static storage base in a different class which
+      // requires checks that the other class is initialized
+      DCHECK_GE(ssb_index, 0);
+      static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
+    }
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kStoreStore);
+    }
+
+    llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+
+    llvm::Value* static_field_addr =
+      irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
+                         irb_.getJType(field_jty)->getPointerTo());
+
+    new_value = TruncateCat1Types(new_value, field_jty);
+    irb_.CreateStore(new_value, static_field_addr, kTBAAHeapStatic, field_jty);
+
+    if (is_volatile) {
+      irb_.CreateMemoryBarrier(art::kStoreLoad);
+    }
+
+    if (field_jty == kObject) { // If put an object, mark the GC card table.
+      EmitMarkGCCard(new_value, static_storage_addr);
+    }
+  }
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_ConstString(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t string_idx = LV2UInt(call_inst.getArgOperand(0));
+
+  llvm::Value* string_field_addr = EmitLoadDexCacheStringFieldAddr(string_idx);
+
+  llvm::Value* string_addr = irb_.CreateLoad(string_field_addr, kTBAARuntimeInfo);
+
+  if (!driver_->CanAssumeStringIsPresentInDexCache(*dex_compilation_unit_->GetDexFile(),
+                                                   string_idx)) {
+    llvm::BasicBlock* block_str_exist =
+      CreateBasicBlockWithDexPC(dex_pc, "str_exist");
+
+    llvm::BasicBlock* block_str_resolve =
+      CreateBasicBlockWithDexPC(dex_pc, "str_resolve");
+
+    llvm::BasicBlock* block_cont =
+      CreateBasicBlockWithDexPC(dex_pc, "str_cont");
+
+    // Test: Is the string resolved and in the dex cache?
+    llvm::Value* equal_null = irb_.CreateICmpEQ(string_addr, irb_.getJNull());
+
+    irb_.CreateCondBr(equal_null, block_str_resolve, block_str_exist, kUnlikely);
+
+    // String is resolved, go to next basic block.
+    irb_.SetInsertPoint(block_str_exist);
+    irb_.CreateBr(block_cont);
+
+    // String is not resolved yet, resolve it now.
+    irb_.SetInsertPoint(block_str_resolve);
+
+    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::ResolveString);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    llvm::Value* string_idx_value = irb_.getInt32(string_idx);
+
+    EmitUpdateDexPC(dex_pc);
+
+    llvm::Value* result = irb_.CreateCall2(runtime_func, method_object_addr,
+                                           string_idx_value);
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+
+    irb_.CreateBr(block_cont);
+
+
+    llvm::BasicBlock* block_pre_cont = irb_.GetInsertBlock();
+
+    irb_.SetInsertPoint(block_cont);
+
+    llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
+
+    phi->addIncoming(string_addr, block_str_exist);
+    phi->addIncoming(result, block_pre_cont);
+
+    string_addr = phi;
+  }
+
+  return string_addr;
+}
+
+llvm::Value* GBCExpanderPass::Expand_ConstClass(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
+
+  llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
+
+  return type_object_addr;
+}
+
+void GBCExpanderPass::Expand_MonitorEnter(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* object_addr = call_inst.getArgOperand(1);
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
+
+  EmitUpdateDexPC(dex_pc);
+
+  irb_.Runtime().EmitLockObject(object_addr);
+
+  return;
+}
+
+void GBCExpanderPass::Expand_MonitorExit(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  llvm::Value* object_addr = call_inst.getArgOperand(1);
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
+
+  EmitUpdateDexPC(dex_pc);
+
+  irb_.Runtime().EmitUnlockObject(object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  return;
+}
+
+void GBCExpanderPass::Expand_HLCheckCast(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
+  llvm::Value* object_addr = call_inst.getArgOperand(1);
+
+  llvm::BasicBlock* block_test_class =
+    CreateBasicBlockWithDexPC(dex_pc, "test_class");
+
+  llvm::BasicBlock* block_test_sub_class =
+    CreateBasicBlockWithDexPC(dex_pc, "test_sub_class");
+
+  llvm::BasicBlock* block_cont =
+    CreateBasicBlockWithDexPC(dex_pc, "checkcast_cont");
+
+  // Test: Is the reference equal to null?  Act as no-op when it is null.
+  llvm::Value* equal_null = irb_.CreateICmpEQ(object_addr, irb_.getJNull());
+
+  irb_.CreateCondBr(equal_null, block_cont, block_test_class, kUnlikely);
+
+  // Test: Is the object instantiated from the given class?
+  irb_.SetInsertPoint(block_test_class);
+  llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
+  DCHECK_EQ(art::mirror::Object::ClassOffset().Int32Value(), 0);
+
+  llvm::PointerType* jobject_ptr_ty = irb_.getJObjectTy();
+
+  llvm::Value* object_type_field_addr =
+    irb_.CreateBitCast(object_addr, jobject_ptr_ty->getPointerTo());
+
+  llvm::Value* object_type_object_addr =
+    irb_.CreateLoad(object_type_field_addr, kTBAAConstJObject);
+
+  llvm::Value* equal_class =
+    irb_.CreateICmpEQ(type_object_addr, object_type_object_addr);
+
+  irb_.CreateCondBr(equal_class, block_cont, block_test_sub_class, kLikely);
+
+  // Test: Is the object instantiated from the subclass of the given class?
+  irb_.SetInsertPoint(block_test_sub_class);
+
+  EmitUpdateDexPC(dex_pc);
+
+  irb_.CreateCall2(irb_.GetRuntime(runtime_support::CheckCast),
+                   type_object_addr, object_type_object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  irb_.CreateBr(block_cont);
+
+  irb_.SetInsertPoint(block_cont);
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::Expand_InstanceOf(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
+  llvm::Value* object_addr = call_inst.getArgOperand(1);
+
+  llvm::BasicBlock* block_nullp =
+      CreateBasicBlockWithDexPC(dex_pc, "nullp");
+
+  llvm::BasicBlock* block_test_class =
+      CreateBasicBlockWithDexPC(dex_pc, "test_class");
+
+  llvm::BasicBlock* block_class_equals =
+      CreateBasicBlockWithDexPC(dex_pc, "class_eq");
+
+  llvm::BasicBlock* block_test_sub_class =
+      CreateBasicBlockWithDexPC(dex_pc, "test_sub_class");
+
+  llvm::BasicBlock* block_cont =
+      CreateBasicBlockWithDexPC(dex_pc, "instance_of_cont");
+
+  // Overview of the following code :
+  // We check for null, if so, then false, otherwise check for class == . If so
+  // then true, otherwise do callout slowpath.
+  //
+  // Test: Is the reference equal to null?  Set 0 when it is null.
+  llvm::Value* equal_null = irb_.CreateICmpEQ(object_addr, irb_.getJNull());
+
+  irb_.CreateCondBr(equal_null, block_nullp, block_test_class, kUnlikely);
+
+  irb_.SetInsertPoint(block_nullp);
+  irb_.CreateBr(block_cont);
+
+  // Test: Is the object instantiated from the given class?
+  irb_.SetInsertPoint(block_test_class);
+  llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
+  DCHECK_EQ(art::mirror::Object::ClassOffset().Int32Value(), 0);
+
+  llvm::PointerType* jobject_ptr_ty = irb_.getJObjectTy();
+
+  llvm::Value* object_type_field_addr =
+    irb_.CreateBitCast(object_addr, jobject_ptr_ty->getPointerTo());
+
+  llvm::Value* object_type_object_addr =
+    irb_.CreateLoad(object_type_field_addr, kTBAAConstJObject);
+
+  llvm::Value* equal_class =
+    irb_.CreateICmpEQ(type_object_addr, object_type_object_addr);
+
+  irb_.CreateCondBr(equal_class, block_class_equals, block_test_sub_class, kLikely);
+
+  irb_.SetInsertPoint(block_class_equals);
+  irb_.CreateBr(block_cont);
+
+  // Test: Is the object instantiated from the subclass of the given class?
+  irb_.SetInsertPoint(block_test_sub_class);
+  llvm::Value* result =
+    irb_.CreateCall2(irb_.GetRuntime(runtime_support::IsAssignable),
+                     type_object_addr, object_type_object_addr);
+  irb_.CreateBr(block_cont);
+
+  irb_.SetInsertPoint(block_cont);
+
+  llvm::PHINode* phi = irb_.CreatePHI(irb_.getJIntTy(), 3);
+
+  phi->addIncoming(irb_.getJInt(0), block_nullp);
+  phi->addIncoming(irb_.getJInt(1), block_class_equals);
+  phi->addIncoming(result, block_test_sub_class);
+
+  return phi;
+}
+
+llvm::Value* GBCExpanderPass::Expand_NewInstance(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
+
+  llvm::Function* runtime_func;
+  if (driver_->CanAccessInstantiableTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
+                                                      *dex_compilation_unit_->GetDexFile(),
+                                                      type_idx)) {
+    runtime_func = irb_.GetRuntime(runtime_support::AllocObject);
+  } else {
+    runtime_func = irb_.GetRuntime(runtime_support::AllocObjectWithAccessCheck);
+  }
+
+  llvm::Constant* type_index_value = irb_.getInt32(type_idx);
+
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+  EmitUpdateDexPC(dex_pc);
+
+  llvm::Value* object_addr =
+    irb_.CreateCall3(runtime_func, type_index_value, method_object_addr, thread_object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  return object_addr;
+}
+
+llvm::Value* GBCExpanderPass::Expand_HLInvoke(llvm::CallInst& call_inst) {
+  art::InvokeType invoke_type = static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
+  bool is_static = (invoke_type == art::kStatic);
+
+  if (!is_static) {
+    // Test: Is *this* parameter equal to null?
+    uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+    llvm::Value* this_addr = call_inst.getArgOperand(3);
+    int opt_flags = LV2UInt(call_inst.getArgOperand(2));
+
+    EmitGuard_NullPointerException(dex_pc, this_addr, opt_flags);
+  }
+
+  llvm::Value* result = NULL;
+  if (EmitIntrinsic(call_inst, &result)) {
+    return result;
+  }
+
+  return EmitInvoke(call_inst);
+}
+
+llvm::Value* GBCExpanderPass::Expand_OptArrayLength(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  // Get the array object address
+  llvm::Value* array_addr = call_inst.getArgOperand(1);
+  int opt_flags = LV2UInt(call_inst.getArgOperand(0));
+
+  EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
+
+  // Get the array length and store it to the register
+  return EmitLoadArrayLength(array_addr);
+}
+
+llvm::Value* GBCExpanderPass::Expand_NewArray(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
+  llvm::Value* length = call_inst.getArgOperand(1);
+
+  return EmitAllocNewArray(dex_pc, length, type_idx, false);
+}
+
+llvm::Value* GBCExpanderPass::Expand_HLFilledNewArray(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  uint32_t type_idx = LV2UInt(call_inst.getArgOperand(1));
+  uint32_t length = call_inst.getNumArgOperands() - 3;
+
+  llvm::Value* object_addr =
+    EmitAllocNewArray(dex_pc, irb_.getInt32(length), type_idx, true);
+
+  if (length > 0) {
+    // Check for the element type
+    uint32_t type_desc_len = 0;
+    const char* type_desc =
+        dex_compilation_unit_->GetDexFile()->StringByTypeIdx(type_idx, &type_desc_len);
+
+    DCHECK_GE(type_desc_len, 2u); // should be guaranteed by verifier
+    DCHECK_EQ(type_desc[0], '['); // should be guaranteed by verifier
+    bool is_elem_int_ty = (type_desc[1] == 'I');
+
+    uint32_t alignment;
+    llvm::Constant* elem_size;
+    llvm::PointerType* field_type;
+
+    // NOTE: Currently filled-new-array only supports 'L', '[', and 'I'
+    // as the element, thus we are only checking 2 cases: primitive int and
+    // non-primitive type.
+    if (is_elem_int_ty) {
+      alignment = sizeof(int32_t);
+      elem_size = irb_.getPtrEquivInt(sizeof(int32_t));
+      field_type = irb_.getJIntTy()->getPointerTo();
+    } else {
+      alignment = irb_.getSizeOfPtrEquivInt();
+      elem_size = irb_.getSizeOfPtrEquivIntValue();
+      field_type = irb_.getJObjectTy()->getPointerTo();
+    }
+
+    llvm::Value* data_field_offset =
+      irb_.getPtrEquivInt(art::mirror::Array::DataOffset(alignment).Int32Value());
+
+    llvm::Value* data_field_addr =
+      irb_.CreatePtrDisp(object_addr, data_field_offset, field_type);
+
+    // TODO: Tune this code.  Currently we are generating one instruction for
+    // one element which may be very space consuming.  Maybe changing to use
+    // memcpy may help; however, since we can't guarantee that the alloca of
+    // dalvik register are continuous, we can't perform such optimization yet.
+    for (uint32_t i = 0; i < length; ++i) {
+      llvm::Value* reg_value = call_inst.getArgOperand(i+3);
+
+      irb_.CreateStore(reg_value, data_field_addr, kTBAAHeapArray);
+
+      data_field_addr =
+        irb_.CreatePtrDisp(data_field_addr, elem_size, field_type);
+    }
+  }
+
+  return object_addr;
+}
+
+void GBCExpanderPass::Expand_HLFillArrayData(llvm::CallInst& call_inst) {
+  uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+  int32_t payload_offset = static_cast<int32_t>(dex_pc) +
+                           LV2SInt(call_inst.getArgOperand(0));
+  llvm::Value* array_addr = call_inst.getArgOperand(1);
+
+  const art::Instruction::ArrayDataPayload* payload =
+    reinterpret_cast<const art::Instruction::ArrayDataPayload*>(
+        dex_compilation_unit_->GetCodeItem()->insns_ + payload_offset);
+
+  if (payload->element_count == 0) {
+    // When the number of the elements in the payload is zero, we don't have
+    // to copy any numbers.  However, we should check whether the array object
+    // address is equal to null or not.
+    EmitGuard_NullPointerException(dex_pc, array_addr, 0);
+  } else {
+    // To save the code size, we are going to call the runtime function to
+    // copy the content from DexFile.
+
+    // NOTE: We will check for the NullPointerException in the runtime.
+
+    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::FillArrayData);
+
+    llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+    EmitUpdateDexPC(dex_pc);
+
+    irb_.CreateCall4(runtime_func,
+                     method_object_addr, irb_.getInt32(dex_pc),
+                     array_addr, irb_.getInt32(payload_offset));
+
+    EmitGuard_ExceptionLandingPad(dex_pc);
+  }
+
+  return;
+}
+
+llvm::Value* GBCExpanderPass::EmitAllocNewArray(uint32_t dex_pc,
+                                                llvm::Value* array_length_value,
+                                                uint32_t type_idx,
+                                                bool is_filled_new_array) {
+  llvm::Function* runtime_func;
+
+  bool skip_access_check =
+    driver_->CanAccessTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
+                                        *dex_compilation_unit_->GetDexFile(), type_idx);
+
+
+  if (is_filled_new_array) {
+    runtime_func = skip_access_check ?
+      irb_.GetRuntime(runtime_support::CheckAndAllocArray) :
+      irb_.GetRuntime(runtime_support::CheckAndAllocArrayWithAccessCheck);
+  } else {
+    runtime_func = skip_access_check ?
+      irb_.GetRuntime(runtime_support::AllocArray) :
+      irb_.GetRuntime(runtime_support::AllocArrayWithAccessCheck);
+  }
+
+  llvm::Constant* type_index_value = irb_.getInt32(type_idx);
+
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+  EmitUpdateDexPC(dex_pc);
+
+  llvm::Value* object_addr =
+    irb_.CreateCall4(runtime_func, type_index_value, method_object_addr,
+                     array_length_value, thread_object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  return object_addr;
+}
+
+llvm::Value* GBCExpanderPass::
+EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx,
+                                         art::InvokeType invoke_type,
+                                         llvm::Value* this_addr,
+                                         uint32_t dex_pc,
+                                         bool is_fast_path) {
+
+  llvm::Function* runtime_func = NULL;
+
+  switch (invoke_type) {
+  case art::kStatic:
+    runtime_func = irb_.GetRuntime(runtime_support::FindStaticMethodWithAccessCheck);
+    break;
+
+  case art::kDirect:
+    runtime_func = irb_.GetRuntime(runtime_support::FindDirectMethodWithAccessCheck);
+    break;
+
+  case art::kVirtual:
+    runtime_func = irb_.GetRuntime(runtime_support::FindVirtualMethodWithAccessCheck);
+    break;
+
+  case art::kSuper:
+    runtime_func = irb_.GetRuntime(runtime_support::FindSuperMethodWithAccessCheck);
+    break;
+
+  case art::kInterface:
+    if (is_fast_path) {
+      runtime_func = irb_.GetRuntime(runtime_support::FindInterfaceMethod);
+    } else {
+      runtime_func = irb_.GetRuntime(runtime_support::FindInterfaceMethodWithAccessCheck);
+    }
+    break;
+  }
+
+  llvm::Value* callee_method_idx_value = irb_.getInt32(callee_method_idx);
+
+  if (this_addr == NULL) {
+    DCHECK_EQ(invoke_type, art::kStatic);
+    this_addr = irb_.getJNull();
+  }
+
+  llvm::Value* caller_method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
+
+  EmitUpdateDexPC(dex_pc);
+
+  llvm::Value* callee_method_object_addr =
+    irb_.CreateCall4(runtime_func,
+                     callee_method_idx_value,
+                     this_addr,
+                     caller_method_object_addr,
+                     thread_object_addr);
+
+  EmitGuard_ExceptionLandingPad(dex_pc);
+
+  return callee_method_object_addr;
+}
+
+void GBCExpanderPass::EmitMarkGCCard(llvm::Value* value, llvm::Value* target_addr) {
+  // Using runtime support, let the target can override by InlineAssembly.
+  irb_.Runtime().EmitMarkGCCard(value, target_addr);
+}
+
+void GBCExpanderPass::EmitUpdateDexPC(uint32_t dex_pc) {
+  if (shadow_frame_ == NULL) {
+    return;
+  }
+  irb_.StoreToObjectOffset(shadow_frame_,
+                           art::ShadowFrame::DexPCOffset(),
+                           irb_.getInt32(dex_pc),
+                           kTBAAShadowFrame);
+}
+
+void GBCExpanderPass::EmitGuard_DivZeroException(uint32_t dex_pc,
+                                                 llvm::Value* denominator,
+                                                 JType op_jty) {
+  DCHECK(op_jty == kInt || op_jty == kLong) << op_jty;
+
+  llvm::Constant* zero = irb_.getJZero(op_jty);
+
+  llvm::Value* equal_zero = irb_.CreateICmpEQ(denominator, zero);
+
+  llvm::BasicBlock* block_exception = CreateBasicBlockWithDexPC(dex_pc, "div0");
+
+  llvm::BasicBlock* block_continue = CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+  irb_.CreateCondBr(equal_zero, block_exception, block_continue, kUnlikely);
+
+  irb_.SetInsertPoint(block_exception);
+  EmitUpdateDexPC(dex_pc);
+  irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowDivZeroException));
+  EmitBranchExceptionLandingPad(dex_pc);
+
+  irb_.SetInsertPoint(block_continue);
+}
+
+void GBCExpanderPass::EmitGuard_NullPointerException(uint32_t dex_pc,
+                                                     llvm::Value* object,
+                                                     int opt_flags) {
+  bool ignore_null_check = ((opt_flags & MIR_IGNORE_NULL_CHECK) != 0);
+  if (ignore_null_check) {
+    llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc);
+    if (lpad) {
+      // There is at least one catch: create a "fake" conditional branch to
+      // keep the exception edge to the catch block.
+      landing_pad_phi_mapping_[lpad].push_back(
+          std::make_pair(current_bb_->getUniquePredecessor(),
+                         irb_.GetInsertBlock()));
+
+      llvm::BasicBlock* block_continue =
+          CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+      irb_.CreateCondBr(irb_.getFalse(), lpad, block_continue, kUnlikely);
+
+      irb_.SetInsertPoint(block_continue);
+    }
+  } else {
+    llvm::Value* equal_null = irb_.CreateICmpEQ(object, irb_.getJNull());
+
+    llvm::BasicBlock* block_exception =
+        CreateBasicBlockWithDexPC(dex_pc, "nullp");
+
+    llvm::BasicBlock* block_continue =
+        CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+    irb_.CreateCondBr(equal_null, block_exception, block_continue, kUnlikely);
+
+    irb_.SetInsertPoint(block_exception);
+    EmitUpdateDexPC(dex_pc);
+    irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowNullPointerException),
+                    irb_.getInt32(dex_pc));
+    EmitBranchExceptionLandingPad(dex_pc);
+
+    irb_.SetInsertPoint(block_continue);
+  }
+}
+
+void
+GBCExpanderPass::EmitGuard_ArrayIndexOutOfBoundsException(uint32_t dex_pc,
+                                                          llvm::Value* array,
+                                                          llvm::Value* index,
+                                                          int opt_flags) {
+  bool ignore_range_check = ((opt_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+  if (ignore_range_check) {
+    llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc);
+    if (lpad) {
+      // There is at least one catch: create a "fake" conditional branch to
+      // keep the exception edge to the catch block.
+      landing_pad_phi_mapping_[lpad].push_back(
+          std::make_pair(current_bb_->getUniquePredecessor(),
+                         irb_.GetInsertBlock()));
+
+      llvm::BasicBlock* block_continue =
+          CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+      irb_.CreateCondBr(irb_.getFalse(), lpad, block_continue, kUnlikely);
+
+      irb_.SetInsertPoint(block_continue);
+    }
+  } else {
+    llvm::Value* array_len = EmitLoadArrayLength(array);
+
+    llvm::Value* cmp = irb_.CreateICmpUGE(index, array_len);
+
+    llvm::BasicBlock* block_exception =
+        CreateBasicBlockWithDexPC(dex_pc, "overflow");
+
+    llvm::BasicBlock* block_continue =
+        CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+    irb_.CreateCondBr(cmp, block_exception, block_continue, kUnlikely);
+
+    irb_.SetInsertPoint(block_exception);
+
+    EmitUpdateDexPC(dex_pc);
+    irb_.CreateCall2(irb_.GetRuntime(runtime_support::ThrowIndexOutOfBounds), index, array_len);
+    EmitBranchExceptionLandingPad(dex_pc);
+
+    irb_.SetInsertPoint(block_continue);
+  }
+}
+
+llvm::FunctionType* GBCExpanderPass::GetFunctionType(llvm::Type* ret_type, uint32_t method_idx,
+                                                     bool is_static) {
+  // Get method signature
+  art::DexFile::MethodId const& method_id =
+      dex_compilation_unit_->GetDexFile()->GetMethodId(method_idx);
+
+  uint32_t shorty_size;
+  const char* shorty = dex_compilation_unit_->GetDexFile()->GetMethodShorty(method_id, &shorty_size);
+  CHECK_GE(shorty_size, 1u);
+
+  // Get argument type
+  std::vector<llvm::Type*> args_type;
+
+  args_type.push_back(irb_.getJObjectTy()); // method object pointer
+
+  if (!is_static) {
+    args_type.push_back(irb_.getJType('L')); // "this" object pointer
+  }
+
+  for (uint32_t i = 1; i < shorty_size; ++i) {
+    char shorty_type = art::RemapShorty(shorty[i]);
+    args_type.push_back(irb_.getJType(shorty_type));
+  }
+
+  return llvm::FunctionType::get(ret_type, args_type, false);
+}
+
+
+llvm::BasicBlock* GBCExpanderPass::
+CreateBasicBlockWithDexPC(uint32_t dex_pc, const char* postfix) {
+  std::string name;
+
+#if !defined(NDEBUG)
+  art::StringAppendF(&name, "B%04x.%s", dex_pc, postfix);
+#endif
+
+  return llvm::BasicBlock::Create(context_, name, func_);
+}
+
+llvm::BasicBlock* GBCExpanderPass::GetBasicBlock(uint32_t dex_pc) {
+  DCHECK(dex_pc < dex_compilation_unit_->GetCodeItem()->insns_size_in_code_units_);
+  CHECK(basic_blocks_[dex_pc] != NULL);
+  return basic_blocks_[dex_pc];
+}
+
+int32_t GBCExpanderPass::GetTryItemOffset(uint32_t dex_pc) {
+  int32_t min = 0;
+  int32_t max = dex_compilation_unit_->GetCodeItem()->tries_size_ - 1;
+
+  while (min <= max) {
+    int32_t mid = min + (max - min) / 2;
+
+    const art::DexFile::TryItem* ti =
+        art::DexFile::GetTryItems(*dex_compilation_unit_->GetCodeItem(), mid);
+    uint32_t start = ti->start_addr_;
+    uint32_t end = start + ti->insn_count_;
+
+    if (dex_pc < start) {
+      max = mid - 1;
+    } else if (dex_pc >= end) {
+      min = mid + 1;
+    } else {
+      return mid; // found
+    }
+  }
+
+  return -1; // not found
+}
+
+llvm::BasicBlock* GBCExpanderPass::GetLandingPadBasicBlock(uint32_t dex_pc) {
+  // Find the try item for this address in this method
+  int32_t ti_offset = GetTryItemOffset(dex_pc);
+
+  if (ti_offset == -1) {
+    return NULL; // No landing pad is available for this address.
+  }
+
+  // Check for the existing landing pad basic block
+  DCHECK_GT(basic_block_landing_pads_.size(), static_cast<size_t>(ti_offset));
+  llvm::BasicBlock* block_lpad = basic_block_landing_pads_[ti_offset];
+
+  if (block_lpad) {
+    // We have generated landing pad for this try item already.  Return the
+    // same basic block.
+    return block_lpad;
+  }
+
+  // Get try item from code item
+  const art::DexFile::TryItem* ti = art::DexFile::GetTryItems(*dex_compilation_unit_->GetCodeItem(),
+                                                              ti_offset);
+
+  std::string lpadname;
+
+#if !defined(NDEBUG)
+  art::StringAppendF(&lpadname, "lpad%d_%04x_to_%04x", ti_offset, ti->start_addr_, ti->handler_off_);
+#endif
+
+  // Create landing pad basic block
+  block_lpad = llvm::BasicBlock::Create(context_, lpadname, func_);
+
+  // Change IRBuilder insert point
+  llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
+  irb_.SetInsertPoint(block_lpad);
+
+  // Find catch block with matching type
+  llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
+
+  llvm::Value* ti_offset_value = irb_.getInt32(ti_offset);
+
+  llvm::Value* catch_handler_index_value =
+    irb_.CreateCall2(irb_.GetRuntime(runtime_support::FindCatchBlock),
+                     method_object_addr, ti_offset_value);
+
+  // Switch instruction (Go to unwind basic block by default)
+  llvm::SwitchInst* sw =
+    irb_.CreateSwitch(catch_handler_index_value, GetUnwindBasicBlock());
+
+  // Cases with matched catch block
+  art::CatchHandlerIterator iter(*dex_compilation_unit_->GetCodeItem(), ti->start_addr_);
+
+  for (uint32_t c = 0; iter.HasNext(); iter.Next(), ++c) {
+    sw->addCase(irb_.getInt32(c), GetBasicBlock(iter.GetHandlerAddress()));
+  }
+
+  // Restore the orignal insert point for IRBuilder
+  irb_.restoreIP(irb_ip_original);
+
+  // Cache this landing pad
+  DCHECK_GT(basic_block_landing_pads_.size(), static_cast<size_t>(ti_offset));
+  basic_block_landing_pads_[ti_offset] = block_lpad;
+
+  return block_lpad;
+}
+
+llvm::BasicBlock* GBCExpanderPass::GetUnwindBasicBlock() {
+  // Check the existing unwinding baisc block block
+  if (basic_block_unwind_ != NULL) {
+    return basic_block_unwind_;
+  }
+
+  // Create new basic block for unwinding
+  basic_block_unwind_ =
+    llvm::BasicBlock::Create(context_, "exception_unwind", func_);
+
+  // Change IRBuilder insert point
+  llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
+  irb_.SetInsertPoint(basic_block_unwind_);
+
+  // Pop the shadow frame
+  Expand_PopShadowFrame();
+
+  // Emit the code to return default value (zero) for the given return type.
+  char ret_shorty = dex_compilation_unit_->GetShorty()[0];
+  ret_shorty = art::RemapShorty(ret_shorty);
+  if (ret_shorty == 'V') {
+    irb_.CreateRetVoid();
+  } else {
+    irb_.CreateRet(irb_.getJZero(ret_shorty));
+  }
+
+  // Restore the orignal insert point for IRBuilder
+  irb_.restoreIP(irb_ip_original);
+
+  return basic_block_unwind_;
+}
+
+void GBCExpanderPass::EmitBranchExceptionLandingPad(uint32_t dex_pc) {
+  if (llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc)) {
+    landing_pad_phi_mapping_[lpad].push_back(std::make_pair(current_bb_->getUniquePredecessor(),
+                                                            irb_.GetInsertBlock()));
+    irb_.CreateBr(lpad);
+  } else {
+    irb_.CreateBr(GetUnwindBasicBlock());
+  }
+}
+
+void GBCExpanderPass::EmitGuard_ExceptionLandingPad(uint32_t dex_pc) {
+  llvm::Value* exception_pending = irb_.Runtime().EmitIsExceptionPending();
+
+  llvm::BasicBlock* block_cont = CreateBasicBlockWithDexPC(dex_pc, "cont");
+
+  if (llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc)) {
+    landing_pad_phi_mapping_[lpad].push_back(std::make_pair(current_bb_->getUniquePredecessor(),
+                                                            irb_.GetInsertBlock()));
+    irb_.CreateCondBr(exception_pending, lpad, block_cont, kUnlikely);
+  } else {
+    irb_.CreateCondBr(exception_pending, GetUnwindBasicBlock(), block_cont, kUnlikely);
+  }
+
+  irb_.SetInsertPoint(block_cont);
+}
+
+llvm::Value*
+GBCExpanderPass::ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
+                                 llvm::CallInst& call_inst) {
+  switch (intr_id) {
+    //==- Thread -----------------------------------------------------------==//
+    case IntrinsicHelper::GetCurrentThread: {
+      return irb_.Runtime().EmitGetCurrentThread();
+    }
+    case IntrinsicHelper::CheckSuspend: {
+      Expand_TestSuspend(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::TestSuspend: {
+      Expand_TestSuspend(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::MarkGCCard: {
+      Expand_MarkGCCard(call_inst);
+      return NULL;
+    }
+
+    //==- Exception --------------------------------------------------------==//
+    case IntrinsicHelper::ThrowException: {
+      return ExpandToRuntime(runtime_support::ThrowException, call_inst);
+    }
+    case IntrinsicHelper::HLThrowException: {
+      uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
+
+      EmitUpdateDexPC(dex_pc);
+
+      irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowException),
+                      call_inst.getArgOperand(0));
+
+      EmitGuard_ExceptionLandingPad(dex_pc);
+      return NULL;
+    }
+    case IntrinsicHelper::GetException: {
+      return irb_.Runtime().EmitGetAndClearException();
+    }
+    case IntrinsicHelper::IsExceptionPending: {
+      return irb_.Runtime().EmitIsExceptionPending();
+    }
+    case IntrinsicHelper::FindCatchBlock: {
+      return ExpandToRuntime(runtime_support::FindCatchBlock, call_inst);
+    }
+    case IntrinsicHelper::ThrowDivZeroException: {
+      return ExpandToRuntime(runtime_support::ThrowDivZeroException, call_inst);
+    }
+    case IntrinsicHelper::ThrowNullPointerException: {
+      return ExpandToRuntime(runtime_support::ThrowNullPointerException, call_inst);
+    }
+    case IntrinsicHelper::ThrowIndexOutOfBounds: {
+      return ExpandToRuntime(runtime_support::ThrowIndexOutOfBounds, call_inst);
+    }
+
+    //==- Const String -----------------------------------------------------==//
+    case IntrinsicHelper::ConstString: {
+      return Expand_ConstString(call_inst);
+    }
+    case IntrinsicHelper::LoadStringFromDexCache: {
+      return Expand_LoadStringFromDexCache(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::ResolveString: {
+      return ExpandToRuntime(runtime_support::ResolveString, call_inst);
+    }
+
+    //==- Const Class ------------------------------------------------------==//
+    case IntrinsicHelper::ConstClass: {
+      return Expand_ConstClass(call_inst);
+    }
+    case IntrinsicHelper::InitializeTypeAndVerifyAccess: {
+      return ExpandToRuntime(runtime_support::InitializeTypeAndVerifyAccess, call_inst);
+    }
+    case IntrinsicHelper::LoadTypeFromDexCache: {
+      return Expand_LoadTypeFromDexCache(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::InitializeType: {
+      return ExpandToRuntime(runtime_support::InitializeType, call_inst);
+    }
+
+    //==- Lock -------------------------------------------------------------==//
+    case IntrinsicHelper::LockObject: {
+      Expand_LockObject(call_inst.getArgOperand(0));
+      return NULL;
+    }
+    case IntrinsicHelper::UnlockObject: {
+      Expand_UnlockObject(call_inst.getArgOperand(0));
+      return NULL;
+    }
+
+    //==- Cast -------------------------------------------------------------==//
+    case IntrinsicHelper::CheckCast: {
+      return ExpandToRuntime(runtime_support::CheckCast, call_inst);
+    }
+    case IntrinsicHelper::HLCheckCast: {
+      Expand_HLCheckCast(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::IsAssignable: {
+      return ExpandToRuntime(runtime_support::IsAssignable, call_inst);
+    }
+
+    //==- Alloc ------------------------------------------------------------==//
+    case IntrinsicHelper::AllocObject: {
+      return ExpandToRuntime(runtime_support::AllocObject, call_inst);
+    }
+    case IntrinsicHelper::AllocObjectWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::AllocObjectWithAccessCheck, call_inst);
+    }
+
+    //==- Instance ---------------------------------------------------------==//
+    case IntrinsicHelper::NewInstance: {
+      return Expand_NewInstance(call_inst);
+    }
+    case IntrinsicHelper::InstanceOf: {
+      return Expand_InstanceOf(call_inst);
+    }
+
+    //==- Array ------------------------------------------------------------==//
+    case IntrinsicHelper::NewArray: {
+      return Expand_NewArray(call_inst);
+    }
+    case IntrinsicHelper::OptArrayLength: {
+      return Expand_OptArrayLength(call_inst);
+    }
+    case IntrinsicHelper::ArrayLength: {
+      return EmitLoadArrayLength(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::AllocArray: {
+      return ExpandToRuntime(runtime_support::AllocArray, call_inst);
+    }
+    case IntrinsicHelper::AllocArrayWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::AllocArrayWithAccessCheck,
+                             call_inst);
+    }
+    case IntrinsicHelper::CheckAndAllocArray: {
+      return ExpandToRuntime(runtime_support::CheckAndAllocArray, call_inst);
+    }
+    case IntrinsicHelper::CheckAndAllocArrayWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::CheckAndAllocArrayWithAccessCheck,
+                             call_inst);
+    }
+    case IntrinsicHelper::ArrayGet: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kInt);
+    }
+    case IntrinsicHelper::ArrayGetWide: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kLong);
+    }
+    case IntrinsicHelper::ArrayGetObject: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kObject);
+    }
+    case IntrinsicHelper::ArrayGetBoolean: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kBoolean);
+    }
+    case IntrinsicHelper::ArrayGetByte: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kByte);
+    }
+    case IntrinsicHelper::ArrayGetChar: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kChar);
+    }
+    case IntrinsicHelper::ArrayGetShort: {
+      return Expand_ArrayGet(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             kShort);
+    }
+    case IntrinsicHelper::ArrayPut: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutWide: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutObject: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kObject);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutBoolean: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutByte: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutChar: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::ArrayPutShort: {
+      Expand_ArrayPut(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      kShort);
+      return NULL;
+    }
+    case IntrinsicHelper::CheckPutArrayElement: {
+      return ExpandToRuntime(runtime_support::CheckPutArrayElement, call_inst);
+    }
+    case IntrinsicHelper::FilledNewArray: {
+      Expand_FilledNewArray(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::FillArrayData: {
+      return ExpandToRuntime(runtime_support::FillArrayData, call_inst);
+    }
+    case IntrinsicHelper::HLFillArrayData: {
+      Expand_HLFillArrayData(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::HLFilledNewArray: {
+      return Expand_HLFilledNewArray(call_inst);
+    }
+
+    //==- Instance Field ---------------------------------------------------==//
+    case IntrinsicHelper::InstanceFieldGet:
+    case IntrinsicHelper::InstanceFieldGetBoolean:
+    case IntrinsicHelper::InstanceFieldGetByte:
+    case IntrinsicHelper::InstanceFieldGetChar:
+    case IntrinsicHelper::InstanceFieldGetShort: {
+      return ExpandToRuntime(runtime_support::Get32Instance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldGetWide: {
+      return ExpandToRuntime(runtime_support::Get64Instance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldGetObject: {
+      return ExpandToRuntime(runtime_support::GetObjectInstance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldGetFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kInt);
+    }
+    case IntrinsicHelper::InstanceFieldGetWideFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kLong);
+    }
+    case IntrinsicHelper::InstanceFieldGetObjectFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kObject);
+    }
+    case IntrinsicHelper::InstanceFieldGetBooleanFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kBoolean);
+    }
+    case IntrinsicHelper::InstanceFieldGetByteFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kByte);
+    }
+    case IntrinsicHelper::InstanceFieldGetCharFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kChar);
+    }
+    case IntrinsicHelper::InstanceFieldGetShortFast: {
+      return Expand_IGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kShort);
+    }
+    case IntrinsicHelper::InstanceFieldPut:
+    case IntrinsicHelper::InstanceFieldPutBoolean:
+    case IntrinsicHelper::InstanceFieldPutByte:
+    case IntrinsicHelper::InstanceFieldPutChar:
+    case IntrinsicHelper::InstanceFieldPutShort: {
+      return ExpandToRuntime(runtime_support::Set32Instance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldPutWide: {
+      return ExpandToRuntime(runtime_support::Set64Instance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldPutObject: {
+      return ExpandToRuntime(runtime_support::SetObjectInstance, call_inst);
+    }
+    case IntrinsicHelper::InstanceFieldPutFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutWideFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutObjectFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kObject);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutBooleanFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutByteFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutCharFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::InstanceFieldPutShortFast: {
+      Expand_IPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kShort);
+      return NULL;
+    }
+
+    //==- Static Field -----------------------------------------------------==//
+    case IntrinsicHelper::StaticFieldGet:
+    case IntrinsicHelper::StaticFieldGetBoolean:
+    case IntrinsicHelper::StaticFieldGetByte:
+    case IntrinsicHelper::StaticFieldGetChar:
+    case IntrinsicHelper::StaticFieldGetShort: {
+      return ExpandToRuntime(runtime_support::Get32Static, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldGetWide: {
+      return ExpandToRuntime(runtime_support::Get64Static, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldGetObject: {
+      return ExpandToRuntime(runtime_support::GetObjectStatic, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldGetFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kInt);
+    }
+    case IntrinsicHelper::StaticFieldGetWideFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kLong);
+    }
+    case IntrinsicHelper::StaticFieldGetObjectFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kObject);
+    }
+    case IntrinsicHelper::StaticFieldGetBooleanFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kBoolean);
+    }
+    case IntrinsicHelper::StaticFieldGetByteFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kByte);
+    }
+    case IntrinsicHelper::StaticFieldGetCharFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kChar);
+    }
+    case IntrinsicHelper::StaticFieldGetShortFast: {
+      return Expand_SGetFast(call_inst.getArgOperand(0),
+                             call_inst.getArgOperand(1),
+                             call_inst.getArgOperand(2),
+                             kShort);
+    }
+    case IntrinsicHelper::StaticFieldPut:
+    case IntrinsicHelper::StaticFieldPutBoolean:
+    case IntrinsicHelper::StaticFieldPutByte:
+    case IntrinsicHelper::StaticFieldPutChar:
+    case IntrinsicHelper::StaticFieldPutShort: {
+      return ExpandToRuntime(runtime_support::Set32Static, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldPutWide: {
+      return ExpandToRuntime(runtime_support::Set64Static, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldPutObject: {
+      return ExpandToRuntime(runtime_support::SetObjectStatic, call_inst);
+    }
+    case IntrinsicHelper::StaticFieldPutFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutWideFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutObjectFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kObject);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutBooleanFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutByteFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutCharFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::StaticFieldPutShortFast: {
+      Expand_SPutFast(call_inst.getArgOperand(0),
+                      call_inst.getArgOperand(1),
+                      call_inst.getArgOperand(2),
+                      call_inst.getArgOperand(3),
+                      kShort);
+      return NULL;
+    }
+    case IntrinsicHelper::LoadDeclaringClassSSB: {
+      return Expand_LoadDeclaringClassSSB(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::LoadClassSSBFromDexCache: {
+      return Expand_LoadClassSSBFromDexCache(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::InitializeAndLoadClassSSB: {
+      return ExpandToRuntime(runtime_support::InitializeStaticStorage, call_inst);
+    }
+
+    //==- High-level Array -------------------------------------------------==//
+    case IntrinsicHelper::HLArrayGet: {
+      return Expand_HLArrayGet(call_inst, kInt);
+    }
+    case IntrinsicHelper::HLArrayGetBoolean: {
+      return Expand_HLArrayGet(call_inst, kBoolean);
+    }
+    case IntrinsicHelper::HLArrayGetByte: {
+      return Expand_HLArrayGet(call_inst, kByte);
+    }
+    case IntrinsicHelper::HLArrayGetChar: {
+      return Expand_HLArrayGet(call_inst, kChar);
+    }
+    case IntrinsicHelper::HLArrayGetShort: {
+      return Expand_HLArrayGet(call_inst, kShort);
+    }
+    case IntrinsicHelper::HLArrayGetFloat: {
+      return Expand_HLArrayGet(call_inst, kFloat);
+    }
+    case IntrinsicHelper::HLArrayGetWide: {
+      return Expand_HLArrayGet(call_inst, kLong);
+    }
+    case IntrinsicHelper::HLArrayGetDouble: {
+      return Expand_HLArrayGet(call_inst, kDouble);
+    }
+    case IntrinsicHelper::HLArrayGetObject: {
+      return Expand_HLArrayGet(call_inst, kObject);
+    }
+    case IntrinsicHelper::HLArrayPut: {
+      Expand_HLArrayPut(call_inst, kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutBoolean: {
+      Expand_HLArrayPut(call_inst, kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutByte: {
+      Expand_HLArrayPut(call_inst, kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutChar: {
+      Expand_HLArrayPut(call_inst, kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutShort: {
+      Expand_HLArrayPut(call_inst, kShort);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutFloat: {
+      Expand_HLArrayPut(call_inst, kFloat);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutWide: {
+      Expand_HLArrayPut(call_inst, kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutDouble: {
+      Expand_HLArrayPut(call_inst, kDouble);
+      return NULL;
+    }
+    case IntrinsicHelper::HLArrayPutObject: {
+      Expand_HLArrayPut(call_inst, kObject);
+      return NULL;
+    }
+
+    //==- High-level Instance ----------------------------------------------==//
+    case IntrinsicHelper::HLIGet: {
+      return Expand_HLIGet(call_inst, kInt);
+    }
+    case IntrinsicHelper::HLIGetBoolean: {
+      return Expand_HLIGet(call_inst, kBoolean);
+    }
+    case IntrinsicHelper::HLIGetByte: {
+      return Expand_HLIGet(call_inst, kByte);
+    }
+    case IntrinsicHelper::HLIGetChar: {
+      return Expand_HLIGet(call_inst, kChar);
+    }
+    case IntrinsicHelper::HLIGetShort: {
+      return Expand_HLIGet(call_inst, kShort);
+    }
+    case IntrinsicHelper::HLIGetFloat: {
+      return Expand_HLIGet(call_inst, kFloat);
+    }
+    case IntrinsicHelper::HLIGetWide: {
+      return Expand_HLIGet(call_inst, kLong);
+    }
+    case IntrinsicHelper::HLIGetDouble: {
+      return Expand_HLIGet(call_inst, kDouble);
+    }
+    case IntrinsicHelper::HLIGetObject: {
+      return Expand_HLIGet(call_inst, kObject);
+    }
+    case IntrinsicHelper::HLIPut: {
+      Expand_HLIPut(call_inst, kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutBoolean: {
+      Expand_HLIPut(call_inst, kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutByte: {
+      Expand_HLIPut(call_inst, kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutChar: {
+      Expand_HLIPut(call_inst, kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutShort: {
+      Expand_HLIPut(call_inst, kShort);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutFloat: {
+      Expand_HLIPut(call_inst, kFloat);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutWide: {
+      Expand_HLIPut(call_inst, kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutDouble: {
+      Expand_HLIPut(call_inst, kDouble);
+      return NULL;
+    }
+    case IntrinsicHelper::HLIPutObject: {
+      Expand_HLIPut(call_inst, kObject);
+      return NULL;
+    }
+
+    //==- High-level Invoke ------------------------------------------------==//
+    case IntrinsicHelper::HLInvokeVoid:
+    case IntrinsicHelper::HLInvokeObj:
+    case IntrinsicHelper::HLInvokeInt:
+    case IntrinsicHelper::HLInvokeFloat:
+    case IntrinsicHelper::HLInvokeLong:
+    case IntrinsicHelper::HLInvokeDouble: {
+      return Expand_HLInvoke(call_inst);
+    }
+
+    //==- Invoke -----------------------------------------------------------==//
+    case IntrinsicHelper::FindStaticMethodWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::FindStaticMethodWithAccessCheck, call_inst);
+    }
+    case IntrinsicHelper::FindDirectMethodWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::FindDirectMethodWithAccessCheck, call_inst);
+    }
+    case IntrinsicHelper::FindVirtualMethodWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::FindVirtualMethodWithAccessCheck, call_inst);
+    }
+    case IntrinsicHelper::FindSuperMethodWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::FindSuperMethodWithAccessCheck, call_inst);
+    }
+    case IntrinsicHelper::FindInterfaceMethodWithAccessCheck: {
+      return ExpandToRuntime(runtime_support::FindInterfaceMethodWithAccessCheck, call_inst);
+    }
+    case IntrinsicHelper::GetSDCalleeMethodObjAddrFast: {
+      return Expand_GetSDCalleeMethodObjAddrFast(call_inst.getArgOperand(0));
+    }
+    case IntrinsicHelper::GetVirtualCalleeMethodObjAddrFast: {
+      return Expand_GetVirtualCalleeMethodObjAddrFast(
+                call_inst.getArgOperand(0), call_inst.getArgOperand(1));
+    }
+    case IntrinsicHelper::GetInterfaceCalleeMethodObjAddrFast: {
+      return ExpandToRuntime(runtime_support::FindInterfaceMethod, call_inst);
+    }
+    case IntrinsicHelper::InvokeRetVoid:
+    case IntrinsicHelper::InvokeRetBoolean:
+    case IntrinsicHelper::InvokeRetByte:
+    case IntrinsicHelper::InvokeRetChar:
+    case IntrinsicHelper::InvokeRetShort:
+    case IntrinsicHelper::InvokeRetInt:
+    case IntrinsicHelper::InvokeRetLong:
+    case IntrinsicHelper::InvokeRetFloat:
+    case IntrinsicHelper::InvokeRetDouble:
+    case IntrinsicHelper::InvokeRetObject: {
+      return Expand_Invoke(call_inst);
+    }
+
+    //==- Math -------------------------------------------------------------==//
+    case IntrinsicHelper::DivInt: {
+      return Expand_DivRem(call_inst, /* is_div */true, kInt);
+    }
+    case IntrinsicHelper::RemInt: {
+      return Expand_DivRem(call_inst, /* is_div */false, kInt);
+    }
+    case IntrinsicHelper::DivLong: {
+      return Expand_DivRem(call_inst, /* is_div */true, kLong);
+    }
+    case IntrinsicHelper::RemLong: {
+      return Expand_DivRem(call_inst, /* is_div */false, kLong);
+    }
+    case IntrinsicHelper::D2L: {
+      return ExpandToRuntime(runtime_support::art_d2l, call_inst);
+    }
+    case IntrinsicHelper::D2I: {
+      return ExpandToRuntime(runtime_support::art_d2i, call_inst);
+    }
+    case IntrinsicHelper::F2L: {
+      return ExpandToRuntime(runtime_support::art_f2l, call_inst);
+    }
+    case IntrinsicHelper::F2I: {
+      return ExpandToRuntime(runtime_support::art_f2i, call_inst);
+    }
+
+    //==- High-level Static ------------------------------------------------==//
+    case IntrinsicHelper::HLSget: {
+      return Expand_HLSget(call_inst, kInt);
+    }
+    case IntrinsicHelper::HLSgetBoolean: {
+      return Expand_HLSget(call_inst, kBoolean);
+    }
+    case IntrinsicHelper::HLSgetByte: {
+      return Expand_HLSget(call_inst, kByte);
+    }
+    case IntrinsicHelper::HLSgetChar: {
+      return Expand_HLSget(call_inst, kChar);
+    }
+    case IntrinsicHelper::HLSgetShort: {
+      return Expand_HLSget(call_inst, kShort);
+    }
+    case IntrinsicHelper::HLSgetFloat: {
+      return Expand_HLSget(call_inst, kFloat);
+    }
+    case IntrinsicHelper::HLSgetWide: {
+      return Expand_HLSget(call_inst, kLong);
+    }
+    case IntrinsicHelper::HLSgetDouble: {
+      return Expand_HLSget(call_inst, kDouble);
+    }
+    case IntrinsicHelper::HLSgetObject: {
+      return Expand_HLSget(call_inst, kObject);
+    }
+    case IntrinsicHelper::HLSput: {
+      Expand_HLSput(call_inst, kInt);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputBoolean: {
+      Expand_HLSput(call_inst, kBoolean);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputByte: {
+      Expand_HLSput(call_inst, kByte);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputChar: {
+      Expand_HLSput(call_inst, kChar);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputShort: {
+      Expand_HLSput(call_inst, kShort);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputFloat: {
+      Expand_HLSput(call_inst, kFloat);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputWide: {
+      Expand_HLSput(call_inst, kLong);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputDouble: {
+      Expand_HLSput(call_inst, kDouble);
+      return NULL;
+    }
+    case IntrinsicHelper::HLSputObject: {
+      Expand_HLSput(call_inst, kObject);
+      return NULL;
+    }
+
+    //==- High-level Monitor -----------------------------------------------==//
+    case IntrinsicHelper::MonitorEnter: {
+      Expand_MonitorEnter(call_inst);
+      return NULL;
+    }
+    case IntrinsicHelper::MonitorExit: {
+      Expand_MonitorExit(call_inst);
+      return NULL;
+    }
+
+    //==- Shadow Frame -----------------------------------------------------==//
+    case IntrinsicHelper::AllocaShadowFrame: {
+      Expand_AllocaShadowFrame(call_inst.getArgOperand(0));
+      return NULL;
+    }
+    case IntrinsicHelper::SetVReg: {
+      Expand_SetVReg(call_inst.getArgOperand(0),
+                     call_inst.getArgOperand(1));
+      return NULL;
+    }
+    case IntrinsicHelper::PopShadowFrame: {
+      Expand_PopShadowFrame();
+      return NULL;
+    }
+    case IntrinsicHelper::UpdateDexPC: {
+      Expand_UpdateDexPC(call_inst.getArgOperand(0));
+      return NULL;
+    }
+
+    //==- Comparison -------------------------------------------------------==//
+    case IntrinsicHelper::CmplFloat:
+    case IntrinsicHelper::CmplDouble: {
+      return Expand_FPCompare(call_inst.getArgOperand(0),
+                              call_inst.getArgOperand(1),
+                              false);
+    }
+    case IntrinsicHelper::CmpgFloat:
+    case IntrinsicHelper::CmpgDouble: {
+      return Expand_FPCompare(call_inst.getArgOperand(0),
+                              call_inst.getArgOperand(1),
+                              true);
+    }
+    case IntrinsicHelper::CmpLong: {
+      return Expand_LongCompare(call_inst.getArgOperand(0),
+                                call_inst.getArgOperand(1));
+    }
+
+    //==- Const ------------------------------------------------------------==//
+    case IntrinsicHelper::ConstInt:
+    case IntrinsicHelper::ConstLong: {
+      return call_inst.getArgOperand(0);
+    }
+    case IntrinsicHelper::ConstFloat: {
+      return irb_.CreateBitCast(call_inst.getArgOperand(0),
+                                irb_.getJFloatTy());
+    }
+    case IntrinsicHelper::ConstDouble: {
+      return irb_.CreateBitCast(call_inst.getArgOperand(0),
+                                irb_.getJDoubleTy());
+    }
+    case IntrinsicHelper::ConstObj: {
+      CHECK(LV2UInt(call_inst.getArgOperand(0)) == 0);
+      return irb_.getJNull();
+    }
+
+    //==- Method Info ------------------------------------------------------==//
+    case IntrinsicHelper::MethodInfo: {
+      // Nothing to be done, because MethodInfo carries optional hints that are
+      // not needed by the portable path.
+      return NULL;
+    }
+
+    //==- Copy -------------------------------------------------------------==//
+    case IntrinsicHelper::CopyInt:
+    case IntrinsicHelper::CopyFloat:
+    case IntrinsicHelper::CopyLong:
+    case IntrinsicHelper::CopyDouble:
+    case IntrinsicHelper::CopyObj: {
+      return call_inst.getArgOperand(0);
+    }
+
+    //==- Shift ------------------------------------------------------------==//
+    case IntrinsicHelper::SHLLong: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerSHL, kLong);
+    }
+    case IntrinsicHelper::SHRLong: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerSHR, kLong);
+    }
+    case IntrinsicHelper::USHRLong: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerUSHR, kLong);
+    }
+    case IntrinsicHelper::SHLInt: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerSHL, kInt);
+    }
+    case IntrinsicHelper::SHRInt: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerSHR, kInt);
+    }
+    case IntrinsicHelper::USHRInt: {
+      return Expand_IntegerShift(call_inst.getArgOperand(0),
+                                 call_inst.getArgOperand(1),
+                                 kIntegerUSHR, kInt);
+    }
+
+    //==- Conversion -------------------------------------------------------==//
+    case IntrinsicHelper::IntToChar: {
+      return irb_.CreateZExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJCharTy()),
+                             irb_.getJIntTy());
+    }
+    case IntrinsicHelper::IntToShort: {
+      return irb_.CreateSExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJShortTy()),
+                             irb_.getJIntTy());
+    }
+    case IntrinsicHelper::IntToByte: {
+      return irb_.CreateSExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJByteTy()),
+                             irb_.getJIntTy());
+    }
+
+    //==- Exception --------------------------------------------------------==//
+    case IntrinsicHelper::CatchTargets: {
+      UpdatePhiInstruction(current_bb_, irb_.GetInsertBlock());
+      llvm::SwitchInst* si = llvm::dyn_cast<llvm::SwitchInst>(call_inst.getNextNode());
+      CHECK(si != NULL);
+      irb_.CreateBr(si->getDefaultDest());
+      si->eraseFromParent();
+      return call_inst.getArgOperand(0);
+    }
+
+    //==- Constructor barrier-----------------------------------------------==//
+    case IntrinsicHelper::ConstructorBarrier: {
+      irb_.CreateMemoryBarrier(art::kStoreStore);
+      return NULL;
+    }
+
+    //==- Unknown Cases ----------------------------------------------------==//
+    case IntrinsicHelper::MaxIntrinsicId:
+    case IntrinsicHelper::UnknownId:
+    //default:
+      // NOTE: "default" is intentionally commented so that C/C++ compiler will
+      // give some warning on unmatched cases.
+      // NOTE: We should not implement these cases.
+      break;
+  }
+  UNIMPLEMENTED(FATAL) << "Unexpected GBC intrinsic: " << static_cast<int>(intr_id);
+  return NULL;
+}
+
+} // anonymous namespace
+
+namespace art {
+namespace llvm {
+
+::llvm::FunctionPass*
+CreateGBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
+                      CompilerDriver* driver, const DexCompilationUnit* dex_compilation_unit) {
+  return new GBCExpanderPass(intrinsic_helper, irb, driver, dex_compilation_unit);
+}
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/generated/art_module.cc b/compiler/llvm/generated/art_module.cc
new file mode 100644
index 0000000..bcd90b9
--- /dev/null
+++ b/compiler/llvm/generated/art_module.cc
@@ -0,0 +1,1096 @@
+// Generated with ./gen_art_module_cc.sh
+
+
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+// TODO: Remove this pragma after llc can generate makeLLVMModuleContents()
+// with smaller frame size.
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+#include <vector>
+
+using namespace llvm;
+
+namespace art {
+namespace llvm {
+
+
+// Generated by llvm2cpp - DO NOT MODIFY!
+
+
+Module* makeLLVMModuleContents(Module *mod) {
+
+mod->setModuleIdentifier("art_module.ll");
+
+// Type Definitions
+std::vector<Type*>FuncTy_0_args;
+StructType *StructTy_JavaObject = mod->getTypeByName("JavaObject");
+if (!StructTy_JavaObject) {
+StructTy_JavaObject = StructType::create(mod->getContext(), "JavaObject");
+}
+std::vector<Type*>StructTy_JavaObject_fields;
+if (StructTy_JavaObject->isOpaque()) {
+StructTy_JavaObject->setBody(StructTy_JavaObject_fields, /*isPacked=*/false);
+}
+
+PointerType* PointerTy_1 = PointerType::get(StructTy_JavaObject, 0);
+
+FuncTy_0_args.push_back(PointerTy_1);
+StructType *StructTy_ShadowFrame = mod->getTypeByName("ShadowFrame");
+if (!StructTy_ShadowFrame) {
+StructTy_ShadowFrame = StructType::create(mod->getContext(), "ShadowFrame");
+}
+std::vector<Type*>StructTy_ShadowFrame_fields;
+StructTy_ShadowFrame_fields.push_back(IntegerType::get(mod->getContext(), 32));
+PointerType* PointerTy_2 = PointerType::get(StructTy_ShadowFrame, 0);
+
+StructTy_ShadowFrame_fields.push_back(PointerTy_2);
+StructTy_ShadowFrame_fields.push_back(PointerTy_1);
+StructTy_ShadowFrame_fields.push_back(IntegerType::get(mod->getContext(), 32));
+if (StructTy_ShadowFrame->isOpaque()) {
+StructTy_ShadowFrame->setBody(StructTy_ShadowFrame_fields, /*isPacked=*/false);
+}
+
+
+FuncTy_0_args.push_back(PointerTy_2);
+FunctionType* FuncTy_0 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_0_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_3_args;
+FunctionType* FuncTy_3 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_3_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_4_args;
+FuncTy_4_args.push_back(PointerTy_1);
+FunctionType* FuncTy_4 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_4_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_5_args;
+FuncTy_5_args.push_back(PointerTy_1);
+FuncTy_5_args.push_back(PointerTy_1);
+FunctionType* FuncTy_5 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_5_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_6_args;
+FuncTy_6_args.push_back(PointerTy_1);
+FunctionType* FuncTy_6 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_6_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_7_args;
+FuncTy_7_args.push_back(PointerTy_1);
+FuncTy_7_args.push_back(PointerTy_2);
+FuncTy_7_args.push_back(PointerTy_1);
+FuncTy_7_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_7 = FunctionType::get(
+ /*Result=*/PointerTy_2,
+ /*Params=*/FuncTy_7_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_8_args;
+FuncTy_8_args.push_back(PointerTy_2);
+FunctionType* FuncTy_8 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_8_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_9_args;
+FunctionType* FuncTy_9 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_9_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_10_args;
+FuncTy_10_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_10_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_10 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_10_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_11_args;
+FuncTy_11_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_11 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_11_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_12_args;
+FuncTy_12_args.push_back(PointerTy_1);
+FuncTy_12_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_12 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_12_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_13_args;
+FuncTy_13_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_13_args.push_back(PointerTy_1);
+FuncTy_13_args.push_back(PointerTy_1);
+FunctionType* FuncTy_13 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_13_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_14_args;
+FuncTy_14_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_14_args.push_back(PointerTy_1);
+FuncTy_14_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_14_args.push_back(PointerTy_1);
+FunctionType* FuncTy_14 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_14_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_15_args;
+FuncTy_15_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_15_args.push_back(PointerTy_1);
+FunctionType* FuncTy_15 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_15_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_16_args;
+FuncTy_16_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_16_args.push_back(PointerTy_1);
+FuncTy_16_args.push_back(PointerTy_1);
+FuncTy_16_args.push_back(PointerTy_1);
+FunctionType* FuncTy_16 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_16_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_17_args;
+FuncTy_17_args.push_back(PointerTy_1);
+FuncTy_17_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_17 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_17_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_18_args;
+FuncTy_18_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_18_args.push_back(PointerTy_1);
+FuncTy_18_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_18 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_18_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_19_args;
+FuncTy_19_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_19_args.push_back(PointerTy_1);
+FuncTy_19_args.push_back(IntegerType::get(mod->getContext(), 64));
+FunctionType* FuncTy_19 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_19_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_20_args;
+FuncTy_20_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_20_args.push_back(PointerTy_1);
+FuncTy_20_args.push_back(PointerTy_1);
+FunctionType* FuncTy_20 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_20_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_21_args;
+FuncTy_21_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_21_args.push_back(PointerTy_1);
+FunctionType* FuncTy_21 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_21_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_22_args;
+FuncTy_22_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_22_args.push_back(PointerTy_1);
+FunctionType* FuncTy_22 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 64),
+ /*Params=*/FuncTy_22_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_23_args;
+FuncTy_23_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_23_args.push_back(PointerTy_1);
+FunctionType* FuncTy_23 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_23_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_24_args;
+FuncTy_24_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_24_args.push_back(PointerTy_1);
+FuncTy_24_args.push_back(PointerTy_1);
+FuncTy_24_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_24 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_24_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_25_args;
+FuncTy_25_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_25_args.push_back(PointerTy_1);
+FuncTy_25_args.push_back(PointerTy_1);
+FuncTy_25_args.push_back(IntegerType::get(mod->getContext(), 64));
+FunctionType* FuncTy_25 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_25_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_26_args;
+FuncTy_26_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_26_args.push_back(PointerTy_1);
+FuncTy_26_args.push_back(PointerTy_1);
+FuncTy_26_args.push_back(PointerTy_1);
+FunctionType* FuncTy_26 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_26_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_27_args;
+FuncTy_27_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_27_args.push_back(PointerTy_1);
+FuncTy_27_args.push_back(PointerTy_1);
+FunctionType* FuncTy_27 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 64),
+ /*Params=*/FuncTy_27_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_28_args;
+FuncTy_28_args.push_back(PointerTy_1);
+FuncTy_28_args.push_back(PointerTy_1);
+FunctionType* FuncTy_28 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_28_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_29_args;
+FuncTy_29_args.push_back(PointerTy_1);
+FuncTy_29_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_29_args.push_back(PointerTy_1);
+FuncTy_29_args.push_back(IntegerType::get(mod->getContext(), 32));
+FunctionType* FuncTy_29 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_29_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_30_args;
+FuncTy_30_args.push_back(PointerTy_1);
+FuncTy_30_args.push_back(PointerTy_1);
+FunctionType* FuncTy_30 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_30_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_31_args;
+FuncTy_31_args.push_back(Type::getDoubleTy(mod->getContext()));
+FunctionType* FuncTy_31 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 64),
+ /*Params=*/FuncTy_31_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_32_args;
+FuncTy_32_args.push_back(Type::getDoubleTy(mod->getContext()));
+FunctionType* FuncTy_32 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_32_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_33_args;
+FuncTy_33_args.push_back(Type::getFloatTy(mod->getContext()));
+FunctionType* FuncTy_33 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 64),
+ /*Params=*/FuncTy_33_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_34_args;
+FuncTy_34_args.push_back(Type::getFloatTy(mod->getContext()));
+FunctionType* FuncTy_34 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_34_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_35_args;
+FuncTy_35_args.push_back(PointerTy_1);
+FunctionType* FuncTy_35 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 32),
+ /*Params=*/FuncTy_35_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_36_args;
+FuncTy_36_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_36_args.push_back(PointerTy_1);
+FuncTy_36_args.push_back(PointerTy_1);
+FunctionType* FuncTy_36 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_36_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_37_args;
+FuncTy_37_args.push_back(PointerTy_1);
+FuncTy_37_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_37_args.push_back(PointerTy_1);
+FunctionType* FuncTy_37 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_37_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_38_args;
+FuncTy_38_args.push_back(PointerTy_1);
+FuncTy_38_args.push_back(IntegerType::get(mod->getContext(), 32));
+FuncTy_38_args.push_back(PointerTy_1);
+FuncTy_38_args.push_back(PointerTy_1);
+FunctionType* FuncTy_38 = FunctionType::get(
+ /*Result=*/PointerTy_1,
+ /*Params=*/FuncTy_38_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_39_args;
+FunctionType* FuncTy_39 = FunctionType::get(
+ /*Result=*/IntegerType::get(mod->getContext(), 1),
+ /*Params=*/FuncTy_39_args,
+ /*isVarArg=*/false);
+
+std::vector<Type*>FuncTy_40_args;
+FuncTy_40_args.push_back(PointerTy_1);
+FunctionType* FuncTy_40 = FunctionType::get(
+ /*Result=*/Type::getVoidTy(mod->getContext()),
+ /*Params=*/FuncTy_40_args,
+ /*isVarArg=*/true);
+
+
+// Function Declarations
+
+Function* func___art_type_list = mod->getFunction("__art_type_list");
+if (!func___art_type_list) {
+func___art_type_list = Function::Create(
+ /*Type=*/FuncTy_0,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"__art_type_list", mod); // (external, no body)
+func___art_type_list->setCallingConv(CallingConv::C);
+}
+AttributeSet func___art_type_list_PAL;
+func___art_type_list->setAttributes(func___art_type_list_PAL);
+
+Function* func_art_portable_get_current_thread_from_code = mod->getFunction("art_portable_get_current_thread_from_code");
+if (!func_art_portable_get_current_thread_from_code) {
+func_art_portable_get_current_thread_from_code = Function::Create(
+ /*Type=*/FuncTy_3,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get_current_thread_from_code", mod); // (external, no body)
+func_art_portable_get_current_thread_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get_current_thread_from_code_PAL;
+func_art_portable_get_current_thread_from_code->setAttributes(func_art_portable_get_current_thread_from_code_PAL);
+
+Function* func_art_portable_set_current_thread_from_code = mod->getFunction("art_portable_set_current_thread_from_code");
+if (!func_art_portable_set_current_thread_from_code) {
+func_art_portable_set_current_thread_from_code = Function::Create(
+ /*Type=*/FuncTy_4,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set_current_thread_from_code", mod); // (external, no body)
+func_art_portable_set_current_thread_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set_current_thread_from_code_PAL;
+func_art_portable_set_current_thread_from_code->setAttributes(func_art_portable_set_current_thread_from_code_PAL);
+
+Function* func_art_portable_lock_object_from_code = mod->getFunction("art_portable_lock_object_from_code");
+if (!func_art_portable_lock_object_from_code) {
+func_art_portable_lock_object_from_code = Function::Create(
+ /*Type=*/FuncTy_5,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_lock_object_from_code", mod); // (external, no body)
+func_art_portable_lock_object_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_lock_object_from_code_PAL;
+func_art_portable_lock_object_from_code->setAttributes(func_art_portable_lock_object_from_code_PAL);
+
+Function* func_art_portable_unlock_object_from_code = mod->getFunction("art_portable_unlock_object_from_code");
+if (!func_art_portable_unlock_object_from_code) {
+func_art_portable_unlock_object_from_code = Function::Create(
+ /*Type=*/FuncTy_5,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_unlock_object_from_code", mod); // (external, no body)
+func_art_portable_unlock_object_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_unlock_object_from_code_PAL;
+func_art_portable_unlock_object_from_code->setAttributes(func_art_portable_unlock_object_from_code_PAL);
+
+Function* func_art_portable_test_suspend_from_code = mod->getFunction("art_portable_test_suspend_from_code");
+if (!func_art_portable_test_suspend_from_code) {
+func_art_portable_test_suspend_from_code = Function::Create(
+ /*Type=*/FuncTy_6,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_test_suspend_from_code", mod); // (external, no body)
+func_art_portable_test_suspend_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_test_suspend_from_code_PAL;
+func_art_portable_test_suspend_from_code->setAttributes(func_art_portable_test_suspend_from_code_PAL);
+
+Function* func_art_portable_push_shadow_frame_from_code = mod->getFunction("art_portable_push_shadow_frame_from_code");
+if (!func_art_portable_push_shadow_frame_from_code) {
+func_art_portable_push_shadow_frame_from_code = Function::Create(
+ /*Type=*/FuncTy_7,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_push_shadow_frame_from_code", mod); // (external, no body)
+func_art_portable_push_shadow_frame_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_push_shadow_frame_from_code_PAL;
+func_art_portable_push_shadow_frame_from_code->setAttributes(func_art_portable_push_shadow_frame_from_code_PAL);
+
+Function* func_art_portable_pop_shadow_frame_from_code = mod->getFunction("art_portable_pop_shadow_frame_from_code");
+if (!func_art_portable_pop_shadow_frame_from_code) {
+func_art_portable_pop_shadow_frame_from_code = Function::Create(
+ /*Type=*/FuncTy_8,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_pop_shadow_frame_from_code", mod); // (external, no body)
+func_art_portable_pop_shadow_frame_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_pop_shadow_frame_from_code_PAL;
+func_art_portable_pop_shadow_frame_from_code->setAttributes(func_art_portable_pop_shadow_frame_from_code_PAL);
+
+Function* func_art_portable_get_and_clear_exception = mod->getFunction("art_portable_get_and_clear_exception");
+if (!func_art_portable_get_and_clear_exception) {
+func_art_portable_get_and_clear_exception = Function::Create(
+ /*Type=*/FuncTy_4,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get_and_clear_exception", mod); // (external, no body)
+func_art_portable_get_and_clear_exception->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get_and_clear_exception_PAL;
+func_art_portable_get_and_clear_exception->setAttributes(func_art_portable_get_and_clear_exception_PAL);
+
+Function* func_art_portable_throw_div_zero_from_code = mod->getFunction("art_portable_throw_div_zero_from_code");
+if (!func_art_portable_throw_div_zero_from_code) {
+func_art_portable_throw_div_zero_from_code = Function::Create(
+ /*Type=*/FuncTy_9,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_div_zero_from_code", mod); // (external, no body)
+func_art_portable_throw_div_zero_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_div_zero_from_code_PAL;
+func_art_portable_throw_div_zero_from_code->setAttributes(func_art_portable_throw_div_zero_from_code_PAL);
+
+Function* func_art_portable_throw_array_bounds_from_code = mod->getFunction("art_portable_throw_array_bounds_from_code");
+if (!func_art_portable_throw_array_bounds_from_code) {
+func_art_portable_throw_array_bounds_from_code = Function::Create(
+ /*Type=*/FuncTy_10,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_array_bounds_from_code", mod); // (external, no body)
+func_art_portable_throw_array_bounds_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_array_bounds_from_code_PAL;
+func_art_portable_throw_array_bounds_from_code->setAttributes(func_art_portable_throw_array_bounds_from_code_PAL);
+
+Function* func_art_portable_throw_no_such_method_from_code = mod->getFunction("art_portable_throw_no_such_method_from_code");
+if (!func_art_portable_throw_no_such_method_from_code) {
+func_art_portable_throw_no_such_method_from_code = Function::Create(
+ /*Type=*/FuncTy_11,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_no_such_method_from_code", mod); // (external, no body)
+func_art_portable_throw_no_such_method_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_no_such_method_from_code_PAL;
+func_art_portable_throw_no_such_method_from_code->setAttributes(func_art_portable_throw_no_such_method_from_code_PAL);
+
+Function* func_art_portable_throw_null_pointer_exception_from_code = mod->getFunction("art_portable_throw_null_pointer_exception_from_code");
+if (!func_art_portable_throw_null_pointer_exception_from_code) {
+func_art_portable_throw_null_pointer_exception_from_code = Function::Create(
+ /*Type=*/FuncTy_11,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_null_pointer_exception_from_code", mod); // (external, no body)
+func_art_portable_throw_null_pointer_exception_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_null_pointer_exception_from_code_PAL;
+func_art_portable_throw_null_pointer_exception_from_code->setAttributes(func_art_portable_throw_null_pointer_exception_from_code_PAL);
+
+Function* func_art_portable_throw_stack_overflow_from_code = mod->getFunction("art_portable_throw_stack_overflow_from_code");
+if (!func_art_portable_throw_stack_overflow_from_code) {
+func_art_portable_throw_stack_overflow_from_code = Function::Create(
+ /*Type=*/FuncTy_9,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_stack_overflow_from_code", mod); // (external, no body)
+func_art_portable_throw_stack_overflow_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_stack_overflow_from_code_PAL;
+func_art_portable_throw_stack_overflow_from_code->setAttributes(func_art_portable_throw_stack_overflow_from_code_PAL);
+
+Function* func_art_portable_throw_exception_from_code = mod->getFunction("art_portable_throw_exception_from_code");
+if (!func_art_portable_throw_exception_from_code) {
+func_art_portable_throw_exception_from_code = Function::Create(
+ /*Type=*/FuncTy_6,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_throw_exception_from_code", mod); // (external, no body)
+func_art_portable_throw_exception_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_throw_exception_from_code_PAL;
+func_art_portable_throw_exception_from_code->setAttributes(func_art_portable_throw_exception_from_code_PAL);
+
+Function* func_art_portable_find_catch_block_from_code = mod->getFunction("art_portable_find_catch_block_from_code");
+if (!func_art_portable_find_catch_block_from_code) {
+func_art_portable_find_catch_block_from_code = Function::Create(
+ /*Type=*/FuncTy_12,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_catch_block_from_code", mod); // (external, no body)
+func_art_portable_find_catch_block_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_catch_block_from_code_PAL;
+func_art_portable_find_catch_block_from_code->setAttributes(func_art_portable_find_catch_block_from_code_PAL);
+
+Function* func_art_portable_alloc_object_from_code = mod->getFunction("art_portable_alloc_object_from_code");
+if (!func_art_portable_alloc_object_from_code) {
+func_art_portable_alloc_object_from_code = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_alloc_object_from_code", mod); // (external, no body)
+func_art_portable_alloc_object_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_alloc_object_from_code_PAL;
+func_art_portable_alloc_object_from_code->setAttributes(func_art_portable_alloc_object_from_code_PAL);
+
+Function* func_art_portable_alloc_object_from_code_with_access_check = mod->getFunction("art_portable_alloc_object_from_code_with_access_check");
+if (!func_art_portable_alloc_object_from_code_with_access_check) {
+func_art_portable_alloc_object_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_alloc_object_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_alloc_object_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_alloc_object_from_code_with_access_check_PAL;
+func_art_portable_alloc_object_from_code_with_access_check->setAttributes(func_art_portable_alloc_object_from_code_with_access_check_PAL);
+
+Function* func_art_portable_alloc_array_from_code = mod->getFunction("art_portable_alloc_array_from_code");
+if (!func_art_portable_alloc_array_from_code) {
+func_art_portable_alloc_array_from_code = Function::Create(
+ /*Type=*/FuncTy_14,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_alloc_array_from_code", mod); // (external, no body)
+func_art_portable_alloc_array_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_alloc_array_from_code_PAL;
+func_art_portable_alloc_array_from_code->setAttributes(func_art_portable_alloc_array_from_code_PAL);
+
+Function* func_art_portable_alloc_array_from_code_with_access_check = mod->getFunction("art_portable_alloc_array_from_code_with_access_check");
+if (!func_art_portable_alloc_array_from_code_with_access_check) {
+func_art_portable_alloc_array_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_14,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_alloc_array_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_alloc_array_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_alloc_array_from_code_with_access_check_PAL;
+func_art_portable_alloc_array_from_code_with_access_check->setAttributes(func_art_portable_alloc_array_from_code_with_access_check_PAL);
+
+Function* func_art_portable_check_and_alloc_array_from_code = mod->getFunction("art_portable_check_and_alloc_array_from_code");
+if (!func_art_portable_check_and_alloc_array_from_code) {
+func_art_portable_check_and_alloc_array_from_code = Function::Create(
+ /*Type=*/FuncTy_14,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_check_and_alloc_array_from_code", mod); // (external, no body)
+func_art_portable_check_and_alloc_array_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_check_and_alloc_array_from_code_PAL;
+func_art_portable_check_and_alloc_array_from_code->setAttributes(func_art_portable_check_and_alloc_array_from_code_PAL);
+
+Function* func_art_portable_check_and_alloc_array_from_code_with_access_check = mod->getFunction("art_portable_check_and_alloc_array_from_code_with_access_check");
+if (!func_art_portable_check_and_alloc_array_from_code_with_access_check) {
+func_art_portable_check_and_alloc_array_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_14,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_check_and_alloc_array_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_check_and_alloc_array_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_check_and_alloc_array_from_code_with_access_check_PAL;
+func_art_portable_check_and_alloc_array_from_code_with_access_check->setAttributes(func_art_portable_check_and_alloc_array_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_instance_field_from_code = mod->getFunction("art_portable_find_instance_field_from_code");
+if (!func_art_portable_find_instance_field_from_code) {
+func_art_portable_find_instance_field_from_code = Function::Create(
+ /*Type=*/FuncTy_15,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_instance_field_from_code", mod); // (external, no body)
+func_art_portable_find_instance_field_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_instance_field_from_code_PAL;
+func_art_portable_find_instance_field_from_code->setAttributes(func_art_portable_find_instance_field_from_code_PAL);
+
+Function* func_art_portable_find_static_field_from_code = mod->getFunction("art_portable_find_static_field_from_code");
+if (!func_art_portable_find_static_field_from_code) {
+func_art_portable_find_static_field_from_code = Function::Create(
+ /*Type=*/FuncTy_15,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_static_field_from_code", mod); // (external, no body)
+func_art_portable_find_static_field_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_static_field_from_code_PAL;
+func_art_portable_find_static_field_from_code->setAttributes(func_art_portable_find_static_field_from_code_PAL);
+
+Function* func_art_portable_find_static_method_from_code_with_access_check = mod->getFunction("art_portable_find_static_method_from_code_with_access_check");
+if (!func_art_portable_find_static_method_from_code_with_access_check) {
+func_art_portable_find_static_method_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_static_method_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_find_static_method_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_static_method_from_code_with_access_check_PAL;
+func_art_portable_find_static_method_from_code_with_access_check->setAttributes(func_art_portable_find_static_method_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_direct_method_from_code_with_access_check = mod->getFunction("art_portable_find_direct_method_from_code_with_access_check");
+if (!func_art_portable_find_direct_method_from_code_with_access_check) {
+func_art_portable_find_direct_method_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_direct_method_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_find_direct_method_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_direct_method_from_code_with_access_check_PAL;
+func_art_portable_find_direct_method_from_code_with_access_check->setAttributes(func_art_portable_find_direct_method_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_virtual_method_from_code_with_access_check = mod->getFunction("art_portable_find_virtual_method_from_code_with_access_check");
+if (!func_art_portable_find_virtual_method_from_code_with_access_check) {
+func_art_portable_find_virtual_method_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_virtual_method_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_find_virtual_method_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_virtual_method_from_code_with_access_check_PAL;
+func_art_portable_find_virtual_method_from_code_with_access_check->setAttributes(func_art_portable_find_virtual_method_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_super_method_from_code_with_access_check = mod->getFunction("art_portable_find_super_method_from_code_with_access_check");
+if (!func_art_portable_find_super_method_from_code_with_access_check) {
+func_art_portable_find_super_method_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_super_method_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_find_super_method_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_super_method_from_code_with_access_check_PAL;
+func_art_portable_find_super_method_from_code_with_access_check->setAttributes(func_art_portable_find_super_method_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_interface_method_from_code_with_access_check = mod->getFunction("art_portable_find_interface_method_from_code_with_access_check");
+if (!func_art_portable_find_interface_method_from_code_with_access_check) {
+func_art_portable_find_interface_method_from_code_with_access_check = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_interface_method_from_code_with_access_check", mod); // (external, no body)
+func_art_portable_find_interface_method_from_code_with_access_check->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_interface_method_from_code_with_access_check_PAL;
+func_art_portable_find_interface_method_from_code_with_access_check->setAttributes(func_art_portable_find_interface_method_from_code_with_access_check_PAL);
+
+Function* func_art_portable_find_interface_method_from_code = mod->getFunction("art_portable_find_interface_method_from_code");
+if (!func_art_portable_find_interface_method_from_code) {
+func_art_portable_find_interface_method_from_code = Function::Create(
+ /*Type=*/FuncTy_16,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_find_interface_method_from_code", mod); // (external, no body)
+func_art_portable_find_interface_method_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_find_interface_method_from_code_PAL;
+func_art_portable_find_interface_method_from_code->setAttributes(func_art_portable_find_interface_method_from_code_PAL);
+
+Function* func_art_portable_initialize_static_storage_from_code = mod->getFunction("art_portable_initialize_static_storage_from_code");
+if (!func_art_portable_initialize_static_storage_from_code) {
+func_art_portable_initialize_static_storage_from_code = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_initialize_static_storage_from_code", mod); // (external, no body)
+func_art_portable_initialize_static_storage_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_initialize_static_storage_from_code_PAL;
+func_art_portable_initialize_static_storage_from_code->setAttributes(func_art_portable_initialize_static_storage_from_code_PAL);
+
+Function* func_art_portable_initialize_type_from_code = mod->getFunction("art_portable_initialize_type_from_code");
+if (!func_art_portable_initialize_type_from_code) {
+func_art_portable_initialize_type_from_code = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_initialize_type_from_code", mod); // (external, no body)
+func_art_portable_initialize_type_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_initialize_type_from_code_PAL;
+func_art_portable_initialize_type_from_code->setAttributes(func_art_portable_initialize_type_from_code_PAL);
+
+Function* func_art_portable_initialize_type_and_verify_access_from_code = mod->getFunction("art_portable_initialize_type_and_verify_access_from_code");
+if (!func_art_portable_initialize_type_and_verify_access_from_code) {
+func_art_portable_initialize_type_and_verify_access_from_code = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_initialize_type_and_verify_access_from_code", mod); // (external, no body)
+func_art_portable_initialize_type_and_verify_access_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_initialize_type_and_verify_access_from_code_PAL;
+func_art_portable_initialize_type_and_verify_access_from_code->setAttributes(func_art_portable_initialize_type_and_verify_access_from_code_PAL);
+
+Function* func_art_portable_resolve_string_from_code = mod->getFunction("art_portable_resolve_string_from_code");
+if (!func_art_portable_resolve_string_from_code) {
+func_art_portable_resolve_string_from_code = Function::Create(
+ /*Type=*/FuncTy_17,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_resolve_string_from_code", mod); // (external, no body)
+func_art_portable_resolve_string_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_resolve_string_from_code_PAL;
+func_art_portable_resolve_string_from_code->setAttributes(func_art_portable_resolve_string_from_code_PAL);
+
+Function* func_art_portable_set32_static_from_code = mod->getFunction("art_portable_set32_static_from_code");
+if (!func_art_portable_set32_static_from_code) {
+func_art_portable_set32_static_from_code = Function::Create(
+ /*Type=*/FuncTy_18,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set32_static_from_code", mod); // (external, no body)
+func_art_portable_set32_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set32_static_from_code_PAL;
+func_art_portable_set32_static_from_code->setAttributes(func_art_portable_set32_static_from_code_PAL);
+
+Function* func_art_portable_set64_static_from_code = mod->getFunction("art_portable_set64_static_from_code");
+if (!func_art_portable_set64_static_from_code) {
+func_art_portable_set64_static_from_code = Function::Create(
+ /*Type=*/FuncTy_19,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set64_static_from_code", mod); // (external, no body)
+func_art_portable_set64_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set64_static_from_code_PAL;
+func_art_portable_set64_static_from_code->setAttributes(func_art_portable_set64_static_from_code_PAL);
+
+Function* func_art_portable_set_obj_static_from_code = mod->getFunction("art_portable_set_obj_static_from_code");
+if (!func_art_portable_set_obj_static_from_code) {
+func_art_portable_set_obj_static_from_code = Function::Create(
+ /*Type=*/FuncTy_20,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set_obj_static_from_code", mod); // (external, no body)
+func_art_portable_set_obj_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set_obj_static_from_code_PAL;
+func_art_portable_set_obj_static_from_code->setAttributes(func_art_portable_set_obj_static_from_code_PAL);
+
+Function* func_art_portable_get32_static_from_code = mod->getFunction("art_portable_get32_static_from_code");
+if (!func_art_portable_get32_static_from_code) {
+func_art_portable_get32_static_from_code = Function::Create(
+ /*Type=*/FuncTy_21,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get32_static_from_code", mod); // (external, no body)
+func_art_portable_get32_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get32_static_from_code_PAL;
+func_art_portable_get32_static_from_code->setAttributes(func_art_portable_get32_static_from_code_PAL);
+
+Function* func_art_portable_get64_static_from_code = mod->getFunction("art_portable_get64_static_from_code");
+if (!func_art_portable_get64_static_from_code) {
+func_art_portable_get64_static_from_code = Function::Create(
+ /*Type=*/FuncTy_22,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get64_static_from_code", mod); // (external, no body)
+func_art_portable_get64_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get64_static_from_code_PAL;
+func_art_portable_get64_static_from_code->setAttributes(func_art_portable_get64_static_from_code_PAL);
+
+Function* func_art_portable_get_obj_static_from_code = mod->getFunction("art_portable_get_obj_static_from_code");
+if (!func_art_portable_get_obj_static_from_code) {
+func_art_portable_get_obj_static_from_code = Function::Create(
+ /*Type=*/FuncTy_23,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get_obj_static_from_code", mod); // (external, no body)
+func_art_portable_get_obj_static_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get_obj_static_from_code_PAL;
+func_art_portable_get_obj_static_from_code->setAttributes(func_art_portable_get_obj_static_from_code_PAL);
+
+Function* func_art_portable_set32_instance_from_code = mod->getFunction("art_portable_set32_instance_from_code");
+if (!func_art_portable_set32_instance_from_code) {
+func_art_portable_set32_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_24,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set32_instance_from_code", mod); // (external, no body)
+func_art_portable_set32_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set32_instance_from_code_PAL;
+func_art_portable_set32_instance_from_code->setAttributes(func_art_portable_set32_instance_from_code_PAL);
+
+Function* func_art_portable_set64_instance_from_code = mod->getFunction("art_portable_set64_instance_from_code");
+if (!func_art_portable_set64_instance_from_code) {
+func_art_portable_set64_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_25,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set64_instance_from_code", mod); // (external, no body)
+func_art_portable_set64_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set64_instance_from_code_PAL;
+func_art_portable_set64_instance_from_code->setAttributes(func_art_portable_set64_instance_from_code_PAL);
+
+Function* func_art_portable_set_obj_instance_from_code = mod->getFunction("art_portable_set_obj_instance_from_code");
+if (!func_art_portable_set_obj_instance_from_code) {
+func_art_portable_set_obj_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_26,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_set_obj_instance_from_code", mod); // (external, no body)
+func_art_portable_set_obj_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_set_obj_instance_from_code_PAL;
+func_art_portable_set_obj_instance_from_code->setAttributes(func_art_portable_set_obj_instance_from_code_PAL);
+
+Function* func_art_portable_get32_instance_from_code = mod->getFunction("art_portable_get32_instance_from_code");
+if (!func_art_portable_get32_instance_from_code) {
+func_art_portable_get32_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_20,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get32_instance_from_code", mod); // (external, no body)
+func_art_portable_get32_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get32_instance_from_code_PAL;
+func_art_portable_get32_instance_from_code->setAttributes(func_art_portable_get32_instance_from_code_PAL);
+
+Function* func_art_portable_get64_instance_from_code = mod->getFunction("art_portable_get64_instance_from_code");
+if (!func_art_portable_get64_instance_from_code) {
+func_art_portable_get64_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_27,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get64_instance_from_code", mod); // (external, no body)
+func_art_portable_get64_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get64_instance_from_code_PAL;
+func_art_portable_get64_instance_from_code->setAttributes(func_art_portable_get64_instance_from_code_PAL);
+
+Function* func_art_portable_get_obj_instance_from_code = mod->getFunction("art_portable_get_obj_instance_from_code");
+if (!func_art_portable_get_obj_instance_from_code) {
+func_art_portable_get_obj_instance_from_code = Function::Create(
+ /*Type=*/FuncTy_13,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_get_obj_instance_from_code", mod); // (external, no body)
+func_art_portable_get_obj_instance_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_get_obj_instance_from_code_PAL;
+func_art_portable_get_obj_instance_from_code->setAttributes(func_art_portable_get_obj_instance_from_code_PAL);
+
+Function* func_art_portable_decode_jobject_in_thread = mod->getFunction("art_portable_decode_jobject_in_thread");
+if (!func_art_portable_decode_jobject_in_thread) {
+func_art_portable_decode_jobject_in_thread = Function::Create(
+ /*Type=*/FuncTy_28,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_decode_jobject_in_thread", mod); // (external, no body)
+func_art_portable_decode_jobject_in_thread->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_decode_jobject_in_thread_PAL;
+func_art_portable_decode_jobject_in_thread->setAttributes(func_art_portable_decode_jobject_in_thread_PAL);
+
+Function* func_art_portable_fill_array_data_from_code = mod->getFunction("art_portable_fill_array_data_from_code");
+if (!func_art_portable_fill_array_data_from_code) {
+func_art_portable_fill_array_data_from_code = Function::Create(
+ /*Type=*/FuncTy_29,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_fill_array_data_from_code", mod); // (external, no body)
+func_art_portable_fill_array_data_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_fill_array_data_from_code_PAL;
+func_art_portable_fill_array_data_from_code->setAttributes(func_art_portable_fill_array_data_from_code_PAL);
+
+Function* func_art_portable_is_assignable_from_code = mod->getFunction("art_portable_is_assignable_from_code");
+if (!func_art_portable_is_assignable_from_code) {
+func_art_portable_is_assignable_from_code = Function::Create(
+ /*Type=*/FuncTy_30,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_is_assignable_from_code", mod); // (external, no body)
+func_art_portable_is_assignable_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_is_assignable_from_code_PAL;
+func_art_portable_is_assignable_from_code->setAttributes(func_art_portable_is_assignable_from_code_PAL);
+
+Function* func_art_portable_check_cast_from_code = mod->getFunction("art_portable_check_cast_from_code");
+if (!func_art_portable_check_cast_from_code) {
+func_art_portable_check_cast_from_code = Function::Create(
+ /*Type=*/FuncTy_5,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_check_cast_from_code", mod); // (external, no body)
+func_art_portable_check_cast_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_check_cast_from_code_PAL;
+func_art_portable_check_cast_from_code->setAttributes(func_art_portable_check_cast_from_code_PAL);
+
+Function* func_art_portable_check_put_array_element_from_code = mod->getFunction("art_portable_check_put_array_element_from_code");
+if (!func_art_portable_check_put_array_element_from_code) {
+func_art_portable_check_put_array_element_from_code = Function::Create(
+ /*Type=*/FuncTy_5,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_check_put_array_element_from_code", mod); // (external, no body)
+func_art_portable_check_put_array_element_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_check_put_array_element_from_code_PAL;
+func_art_portable_check_put_array_element_from_code->setAttributes(func_art_portable_check_put_array_element_from_code_PAL);
+
+Function* func_art_d2l = mod->getFunction("art_d2l");
+if (!func_art_d2l) {
+func_art_d2l = Function::Create(
+ /*Type=*/FuncTy_31,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_d2l", mod); // (external, no body)
+func_art_d2l->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_d2l_PAL;
+func_art_d2l->setAttributes(func_art_d2l_PAL);
+
+Function* func_art_d2i = mod->getFunction("art_d2i");
+if (!func_art_d2i) {
+func_art_d2i = Function::Create(
+ /*Type=*/FuncTy_32,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_d2i", mod); // (external, no body)
+func_art_d2i->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_d2i_PAL;
+func_art_d2i->setAttributes(func_art_d2i_PAL);
+
+Function* func_art_f2l = mod->getFunction("art_f2l");
+if (!func_art_f2l) {
+func_art_f2l = Function::Create(
+ /*Type=*/FuncTy_33,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_f2l", mod); // (external, no body)
+func_art_f2l->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_f2l_PAL;
+func_art_f2l->setAttributes(func_art_f2l_PAL);
+
+Function* func_art_f2i = mod->getFunction("art_f2i");
+if (!func_art_f2i) {
+func_art_f2i = Function::Create(
+ /*Type=*/FuncTy_34,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_f2i", mod); // (external, no body)
+func_art_f2i->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_f2i_PAL;
+func_art_f2i->setAttributes(func_art_f2i_PAL);
+
+Function* func_art_portable_jni_method_start = mod->getFunction("art_portable_jni_method_start");
+if (!func_art_portable_jni_method_start) {
+func_art_portable_jni_method_start = Function::Create(
+ /*Type=*/FuncTy_35,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_start", mod); // (external, no body)
+func_art_portable_jni_method_start->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_start_PAL;
+func_art_portable_jni_method_start->setAttributes(func_art_portable_jni_method_start_PAL);
+
+Function* func_art_portable_jni_method_start_synchronized = mod->getFunction("art_portable_jni_method_start_synchronized");
+if (!func_art_portable_jni_method_start_synchronized) {
+func_art_portable_jni_method_start_synchronized = Function::Create(
+ /*Type=*/FuncTy_30,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_start_synchronized", mod); // (external, no body)
+func_art_portable_jni_method_start_synchronized->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_start_synchronized_PAL;
+func_art_portable_jni_method_start_synchronized->setAttributes(func_art_portable_jni_method_start_synchronized_PAL);
+
+Function* func_art_portable_jni_method_end = mod->getFunction("art_portable_jni_method_end");
+if (!func_art_portable_jni_method_end) {
+func_art_portable_jni_method_end = Function::Create(
+ /*Type=*/FuncTy_15,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_end", mod); // (external, no body)
+func_art_portable_jni_method_end->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_end_PAL;
+func_art_portable_jni_method_end->setAttributes(func_art_portable_jni_method_end_PAL);
+
+Function* func_art_portable_jni_method_end_synchronized = mod->getFunction("art_portable_jni_method_end_synchronized");
+if (!func_art_portable_jni_method_end_synchronized) {
+func_art_portable_jni_method_end_synchronized = Function::Create(
+ /*Type=*/FuncTy_36,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_end_synchronized", mod); // (external, no body)
+func_art_portable_jni_method_end_synchronized->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_end_synchronized_PAL;
+func_art_portable_jni_method_end_synchronized->setAttributes(func_art_portable_jni_method_end_synchronized_PAL);
+
+Function* func_art_portable_jni_method_end_with_reference = mod->getFunction("art_portable_jni_method_end_with_reference");
+if (!func_art_portable_jni_method_end_with_reference) {
+func_art_portable_jni_method_end_with_reference = Function::Create(
+ /*Type=*/FuncTy_37,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_end_with_reference", mod); // (external, no body)
+func_art_portable_jni_method_end_with_reference->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_end_with_reference_PAL;
+func_art_portable_jni_method_end_with_reference->setAttributes(func_art_portable_jni_method_end_with_reference_PAL);
+
+Function* func_art_portable_jni_method_end_with_reference_synchronized = mod->getFunction("art_portable_jni_method_end_with_reference_synchronized");
+if (!func_art_portable_jni_method_end_with_reference_synchronized) {
+func_art_portable_jni_method_end_with_reference_synchronized = Function::Create(
+ /*Type=*/FuncTy_38,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_jni_method_end_with_reference_synchronized", mod); // (external, no body)
+func_art_portable_jni_method_end_with_reference_synchronized->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_jni_method_end_with_reference_synchronized_PAL;
+func_art_portable_jni_method_end_with_reference_synchronized->setAttributes(func_art_portable_jni_method_end_with_reference_synchronized_PAL);
+
+Function* func_art_portable_is_exception_pending_from_code = mod->getFunction("art_portable_is_exception_pending_from_code");
+if (!func_art_portable_is_exception_pending_from_code) {
+func_art_portable_is_exception_pending_from_code = Function::Create(
+ /*Type=*/FuncTy_39,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_is_exception_pending_from_code", mod); // (external, no body)
+func_art_portable_is_exception_pending_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_is_exception_pending_from_code_PAL;
+func_art_portable_is_exception_pending_from_code->setAttributes(func_art_portable_is_exception_pending_from_code_PAL);
+
+Function* func_art_portable_mark_gc_card_from_code = mod->getFunction("art_portable_mark_gc_card_from_code");
+if (!func_art_portable_mark_gc_card_from_code) {
+func_art_portable_mark_gc_card_from_code = Function::Create(
+ /*Type=*/FuncTy_5,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_mark_gc_card_from_code", mod); // (external, no body)
+func_art_portable_mark_gc_card_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_mark_gc_card_from_code_PAL;
+func_art_portable_mark_gc_card_from_code->setAttributes(func_art_portable_mark_gc_card_from_code_PAL);
+
+Function* func_art_portable_proxy_invoke_handler_from_code = mod->getFunction("art_portable_proxy_invoke_handler_from_code");
+if (!func_art_portable_proxy_invoke_handler_from_code) {
+func_art_portable_proxy_invoke_handler_from_code = Function::Create(
+ /*Type=*/FuncTy_40,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"art_portable_proxy_invoke_handler_from_code", mod); // (external, no body)
+func_art_portable_proxy_invoke_handler_from_code->setCallingConv(CallingConv::C);
+}
+AttributeSet func_art_portable_proxy_invoke_handler_from_code_PAL;
+func_art_portable_proxy_invoke_handler_from_code->setAttributes(func_art_portable_proxy_invoke_handler_from_code_PAL);
+
+// Global Variable Declarations
+
+
+// Constant Definitions
+
+// Global Variable Definitions
+
+// Function Definitions
+
+return mod;
+
+}
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/intrinsic_func_list.def b/compiler/llvm/intrinsic_func_list.def
new file mode 100644
index 0000000..92537ba
--- /dev/null
+++ b/compiler/llvm/intrinsic_func_list.def
@@ -0,0 +1,1803 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE,
+//                     ARG1_TYPE, ARG2_TYPE, ARG3_TYPE, ARG4_TYPE, ARG5_TYPE)
+#ifndef DEF_INTRINSICS_FUNC
+#  error "missing DEF_INTRINSICS_FUNC definition!"
+#endif
+
+#define _EVAL_DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE, ...) \
+    DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE, __VA_ARGS__)
+
+#define _EXPAND_ARG0()                         kNone, kNone, kNone, kNone, kNone
+#define _EXPAND_ARG1(ARG1)                      ARG1, kNone, kNone, kNone, kNone
+#define _EXPAND_ARG2(ARG1, ARG2)                ARG1,  ARG2, kNone, kNone, kNone
+#define _EXPAND_ARG3(ARG1, ARG2, ARG3)          ARG1,  ARG2,  ARG3, kNone, kNone
+#define _EXPAND_ARG4(ARG1, ARG2, ARG3, ARG4)    ARG1,  ARG2,  ARG3,  ARG4, kNone
+#define _EXPAND_ARG5(ARG1, ARG2, ARG3, ARG4, ARG5) \
+                                                ARG1,  ARG2,  ARG3,  ARG4,  ARG5
+
+#define _JTYPE(TYPE, SPACE) _JTYPE_OF_ ## TYPE ## _UNDER_ ## SPACE
+
+// Note: These should be consistent with the type return from
+// IRBuilder::GetJType([type], kArray).
+#define _JTYPE_OF_kInt1Ty_UNDER_kArray        kInt8Ty
+#define _JTYPE_OF_kInt8Ty_UNDER_kArray        kInt8Ty
+#define _JTYPE_OF_kInt16Ty_UNDER_kArray       kInt16Ty
+#define _JTYPE_OF_kInt32Ty_UNDER_kArray       kInt32Ty
+#define _JTYPE_OF_kInt64Ty_UNDER_kArray       kInt64Ty
+#define _JTYPE_OF_kJavaObjectTy_UNDER_kArray  kJavaObjectTy
+
+// Note: These should be consistent with the type return from
+// IRBuilder::GetJType([type], kField).
+#define _JTYPE_OF_kInt1Ty_UNDER_kField        kInt32Ty
+#define _JTYPE_OF_kInt8Ty_UNDER_kField        kInt32Ty
+#define _JTYPE_OF_kInt16Ty_UNDER_kField       kInt32Ty
+#define _JTYPE_OF_kInt32Ty_UNDER_kField       kInt32Ty
+#define _JTYPE_OF_kInt64Ty_UNDER_kField       kInt64Ty
+#define _JTYPE_OF_kJavaObjectTy_UNDER_kField  kJavaObjectTy
+
+//----------------------------------------------------------------------------
+// Thread
+//----------------------------------------------------------------------------
+
+// Thread* art_portable_get_current_thread()
+_EVAL_DEF_INTRINSICS_FUNC(GetCurrentThread,
+                          art_portable_get_current_thread,
+                          kAttrReadNone | kAttrNoThrow,
+                          kJavaThreadTy,
+                          _EXPAND_ARG0())
+
+// void art_portable_test_suspend(Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(TestSuspend,
+                          art_portable_test_suspend,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kJavaThreadTy))
+
+// void art_portable_check_suspend() /* Expands to GetCurrentThread/TestSuspend */
+_EVAL_DEF_INTRINSICS_FUNC(CheckSuspend,
+                          art_portable_check_suspend,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG0())
+
+// void art_portable_mark_gc_card(Object* new_value, Object* object)
+_EVAL_DEF_INTRINSICS_FUNC(MarkGCCard,
+                          art_portable_mark_gc_card,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
+
+//----------------------------------------------------------------------------
+// Exception
+//----------------------------------------------------------------------------
+
+// Should not expand - introduces the catch targets for a potentially
+// throwing instruction.  The result is a switch key and this
+// instruction will be followed by a switch statement.  The catch
+// targets will be enumerated as cases of the switch, with the fallthrough
+// designating the block containing the potentially throwing instruction.
+// bool art_portable_catch_targets(int dex_pc)
+_EVAL_DEF_INTRINSICS_FUNC(CatchTargets,
+                          art_portable_catch_targets,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// void art_portable_throw_exception(JavaObject* exception)
+_EVAL_DEF_INTRINSICS_FUNC(ThrowException,
+                          art_portable_throw_exception,
+                          kAttrDoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kJavaObjectTy))
+
+// void art_portable_hl_throw_exception(JavaObject* exception)
+_EVAL_DEF_INTRINSICS_FUNC(HLThrowException,
+                          art_portable_hl_throw_exception,
+                          kAttrDoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kJavaObjectTy))
+
+// JavaObject* art_portable_get_current_exception()
+_EVAL_DEF_INTRINSICS_FUNC(GetException,
+                          art_portable_get_current_exception,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG0())
+
+// bool art_portable_is_exception_pending()
+_EVAL_DEF_INTRINSICS_FUNC(IsExceptionPending,
+                          art_portable_is_exception_pending,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt1Ty,
+                          _EXPAND_ARG0())
+
+// int art_portable_find_catch_block(Method* method, int try_item_offset)
+_EVAL_DEF_INTRINSICS_FUNC(FindCatchBlock,
+                          art_portable_find_catch_block,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kInt32ConstantTy))
+
+// void art_portable_throw_div_zero()
+_EVAL_DEF_INTRINSICS_FUNC(ThrowDivZeroException,
+                          art_portable_throw_div_zero,
+                          kAttrDoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG0())
+
+// void art_portable_throw_null_pointer_exception(uint32_t dex_pc)
+_EVAL_DEF_INTRINSICS_FUNC(ThrowNullPointerException,
+                          art_portable_throw_null_pointer_exception,
+                          kAttrDoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// void art_portable_throw_array_bounds(int index, int array_len)
+_EVAL_DEF_INTRINSICS_FUNC(ThrowIndexOutOfBounds,
+                          art_portable_throw_array_bounds,
+                          kAttrDoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+//----------------------------------------------------------------------------
+// ConstString
+//----------------------------------------------------------------------------
+
+// JavaObject* art_portable_const_string(uint32_t string_idx)
+_EVAL_DEF_INTRINSICS_FUNC(ConstString,
+                          art_portable_const_string,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// JavaObject* art_portable_load_string_from_dex_cache(Method* method, uint32_t string_idx)
+_EVAL_DEF_INTRINSICS_FUNC(LoadStringFromDexCache,
+                          art_portable_load_string_from_dex_cache,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// JavaObject* art_portable_resolve_string(Method* method, uint32_t string_idx)
+_EVAL_DEF_INTRINSICS_FUNC(ResolveString,
+                          art_portable_resolve_string,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG2(kJavaMethodTy, kInt32ConstantTy))
+
+//----------------------------------------------------------------------------
+// ConstClass
+//----------------------------------------------------------------------------
+
+// JavaObject* art_portable_const_class(uint32_t type_idx)
+_EVAL_DEF_INTRINSICS_FUNC(ConstClass,
+                          art_portable_const_class,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// JavaObject* art_portable_initialize_type_and_verify_access(uint32_t type_idx,
+//                                                        Method* referrer,
+//                                                        Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(InitializeTypeAndVerifyAccess,
+                          art_portable_initialize_type_and_verify_access,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
+
+// JavaObject* art_portable_load_type_from_dex_cache(uint32_t type_idx)
+_EVAL_DEF_INTRINSICS_FUNC(LoadTypeFromDexCache,
+                          art_portable_load_type_from_dex_cache,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// JavaObject* art_portable_initialize_type(uint32_t type_idx,
+//                                      Method* referrer,
+//                                      Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(InitializeType,
+                          art_portable_initialize_type,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
+
+//----------------------------------------------------------------------------
+// Lock
+//----------------------------------------------------------------------------
+
+// void art_portable_lock_object(JavaObject* obj, Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(LockObject,
+                          art_portable_lock_object,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaThreadTy))
+
+// void art_portable_unlock_object(JavaObject* obj, Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(UnlockObject,
+                          art_portable_unlock_object,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaThreadTy))
+
+//----------------------------------------------------------------------------
+// Cast
+//----------------------------------------------------------------------------
+
+// void art_portable_check_cast(JavaObject* dest_type, JavaObject* src_type)
+_EVAL_DEF_INTRINSICS_FUNC(CheckCast,
+                          art_portable_check_cast,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
+
+// void art_portable_hl_check_cast(uint32_t type_idx, JavaObject* obj)
+_EVAL_DEF_INTRINSICS_FUNC(HLCheckCast,
+                          art_portable_hl_check_cast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
+
+// int art_portable_is_assignable(JavaObject* dest_type, JavaObject* src_type)
+_EVAL_DEF_INTRINSICS_FUNC(IsAssignable,
+                          art_portable_is_assignable,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
+
+//----------------------------------------------------------------------------
+// Allocation
+//----------------------------------------------------------------------------
+
+// JavaObject* art_portable_alloc_object(uint32_t type_idx,
+//                                   Method* referrer,
+//                                   Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(AllocObject,
+                          art_portable_alloc_object,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
+
+// JavaObject* art_portable_alloc_object_with_access_check(uint32_t type_idx,
+//                                                     Method* referrer,
+//                                                     Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(AllocObjectWithAccessCheck,
+                          art_portable_alloc_object_with_access_check,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
+
+//----------------------------------------------------------------------------
+// Instance
+//----------------------------------------------------------------------------
+
+// JavaObject* art_portable_new_instance(uint32_t type_idx)
+_EVAL_DEF_INTRINSICS_FUNC(NewInstance,
+                          art_portable_new_instance,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// bool art_portable_instance_of(uint32_t type_idx, JavaObject* ref)
+_EVAL_DEF_INTRINSICS_FUNC(InstanceOf,
+                          art_portable_instance_of,
+                          kAttrNone,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
+
+//----------------------------------------------------------------------------
+// Array
+//----------------------------------------------------------------------------
+
+// JavaObject* art_portable_new_array(uint32_t type_idx, uint32_t array_size)
+_EVAL_DEF_INTRINSICS_FUNC(NewArray,
+                          art_portable_new_array,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG2(kInt32ConstantTy, kInt32Ty))
+
+// uint32_t art_portable_opt_array_length(int32_t opt_flags, JavaObject* array)
+_EVAL_DEF_INTRINSICS_FUNC(OptArrayLength,
+                          art_portable_opt_array_length,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
+
+// uint32_t art_portable_array_length(JavaObject* array)
+_EVAL_DEF_INTRINSICS_FUNC(ArrayLength,
+                          art_portable_array_length,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kJavaObjectTy))
+
+// JavaObject* art_portable_alloc_array(uint32_t type_idx,
+//                                  Method* referrer,
+//                                  uint32_t length,
+//                                  Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(AllocArray,
+                          art_portable_alloc_array,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32Ty, kJavaThreadTy))
+
+// JavaObject* art_portable_alloc_array_with_access_check(uint32_t type_idx,
+//                                                    Method* referrer,
+//                                                    uint32_t length,
+//                                                    Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(AllocArrayWithAccessCheck,
+                          art_portable_alloc_array_with_access_check,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32Ty, kJavaThreadTy))
+
+// JavaObject* art_portable_check_and_alloc_array(uint32_t type_idx,
+//                                            Method* referrer,
+//                                            uint32_t length,
+//                                            Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(CheckAndAllocArray,
+                          art_portable_check_and_alloc_array,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32ConstantTy, kJavaThreadTy))
+
+// JavaObject* art_portable_check_and_alloc_array_with_access_check(uint32_t type_idx,
+//                                                              Method* referrer,
+//                                                              uint32_t length,
+//                                                              Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(CheckAndAllocArrayWithAccessCheck,
+                          art_portable_check_and_alloc_array_with_access_check,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32ConstantTy, kJavaThreadTy))
+
+// art_portable_aget_* and art_portable_aput_* never generate exception since the
+// necessary checking on arguments (e.g., array and index) has already done
+// before invocation of these intrinsics.
+//
+// [type] void art_portable_aget_[type](JavaObject* array, uint32_t index)
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGet,
+                          art_portable_aget,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt32Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetWide,
+                          art_portable_aget_wide,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt64Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetObject,
+                          art_portable_aget_object,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kJavaObjectTy, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetBoolean,
+                          art_portable_aget_boolean,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt1Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetByte,
+                          art_portable_aget_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt8Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetChar,
+                          art_portable_aget_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayGetShort,
+                          art_portable_aget_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kArray),
+                          _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
+
+// void art_portable_aput_[type]([type] value, JavaObject* array, uint32_t index)
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPut,
+                          art_portable_aput,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt32Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutWide,
+                          art_portable_aput_wide,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt64Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutObject,
+                          art_portable_aput_object,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kJavaObjectTy, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutBoolean,
+                          art_portable_aput_boolean,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt1Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutByte,
+                          art_portable_aput_byte,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt8Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutChar,
+                          art_portable_aput_char,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt16Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(ArrayPutShort,
+                          art_portable_aput_short,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(_JTYPE(kInt16Ty, kArray), kJavaObjectTy, kInt32Ty))
+
+// void art_portable_check_put_array_element(JavaObject* value, JavaObject* array)
+_EVAL_DEF_INTRINSICS_FUNC(CheckPutArrayElement,
+                          art_portable_check_put_array_element,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
+
+// void art_portable_filled_new_array(Array* array,
+//                                uint32_t elem_jty, ...)
+_EVAL_DEF_INTRINSICS_FUNC(FilledNewArray,
+                          art_portable_filled_new_array,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kVarArgTy))
+
+// void art_portable_fill_array_data(Method* referrer,
+//                               uint32_t dex_pc,
+//                               Array* array,
+//                               uint32_t payload_offset)
+_EVAL_DEF_INTRINSICS_FUNC(FillArrayData,
+                          art_portable_fill_array_data,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaMethodTy, kInt32ConstantTy, kJavaObjectTy, kInt32ConstantTy))
+
+// void art_portable_hl_fill_array_data(int32_t offset, JavaObject* array)
+_EVAL_DEF_INTRINSICS_FUNC(HLFillArrayData,
+                          art_portable_hl_fill_array_data,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
+
+//----------------------------------------------------------------------------
+// Instance Field
+//----------------------------------------------------------------------------
+
+// [type] art_portable_iget_[type](uint32_t field_idx,
+//                             Method* referrer,
+//                             JavaObject* obj)
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGet,
+                          art_portable_iget,
+                          kAttrNone,
+                          _JTYPE(kInt32Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetWide,
+                          art_portable_iget_wide,
+                          kAttrNone,
+                          _JTYPE(kInt64Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetObject,
+                          art_portable_iget_object,
+                          kAttrNone,
+                          _JTYPE(kJavaObjectTy, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetBoolean,
+                          art_portable_iget_boolean,
+                          kAttrNone,
+                          _JTYPE(kInt1Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetByte,
+                          art_portable_iget_byte,
+                          kAttrNone,
+                          _JTYPE(kInt8Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetChar,
+                          art_portable_iget_char,
+                          kAttrNone,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetShort,
+                          art_portable_iget_short,
+                          kAttrNone,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
+
+// [type] art_portable_iget_[type].fast(int field_offset,
+//                                  bool is_volatile,
+//                                  JavaObject* obj)
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetFast,
+                          art_portable_iget.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt32Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetWideFast,
+                          art_portable_iget_wide.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt64Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetObjectFast,
+                          art_portable_iget_object.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kJavaObjectTy, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetBooleanFast,
+                          art_portable_iget_boolean.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt1Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetByteFast,
+                          art_portable_iget_byte.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt8Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetCharFast,
+                          art_portable_iget_char.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetShortFast,
+                          art_portable_iget_short.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
+
+// void art_portable_iput_[type](uint32_t field_idx,
+//                           Method* referrer,
+//                           JavaObject* obj,
+//                           [type] new_value)
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPut,
+                          art_portable_iput,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt32Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutWide,
+                          art_portable_iput_wide,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt64Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutObject,
+                          art_portable_iput_object,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kJavaObjectTy, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutBoolean,
+                          art_portable_iput_boolean,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt1Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutByte,
+                          art_portable_iput_byte,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt8Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutChar,
+                          art_portable_iput_char,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutShort,
+                          art_portable_iput_short,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
+
+// void art_portable_iput_[type].fast(int field_offset,
+//                                bool is_volatile,
+//                                JavaObject* obj,
+//                                [type] new_value)
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutFast,
+                          art_portable_iput.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt32Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutWideFast,
+                          art_portable_iput_wide.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt64Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutObjectFast,
+                          art_portable_iput_object.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kJavaObjectTy, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutBooleanFast,
+                          art_portable_iput_boolean.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt1Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutByteFast,
+                          art_portable_iput_byte.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt8Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutCharFast,
+                          art_portable_iput_char.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutShortFast,
+                          art_portable_iput_short.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
+
+//----------------------------------------------------------------------------
+// Static Field
+//----------------------------------------------------------------------------
+
+// [type] art_portable_sget_[type](uint32_t field_idx, Method* referrer)
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGet,
+                          art_portable_sget,
+                          kAttrNone,
+                          _JTYPE(kInt32Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetWide,
+                          art_portable_sget_wide,
+                          kAttrNone,
+                          _JTYPE(kInt64Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetObject,
+                          art_portable_sget_object,
+                          kAttrNone,
+                          _JTYPE(kJavaObjectTy, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetBoolean,
+                          art_portable_sget_boolean,
+                          kAttrNone,
+                          _JTYPE(kInt1Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetByte,
+                          art_portable_sget_byte,
+                          kAttrNone,
+                          _JTYPE(kInt8Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetChar,
+                          art_portable_sget_char,
+                          kAttrNone,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetShort,
+                          art_portable_sget_short,
+                          kAttrNone,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
+
+// [type] art_portable_sget_[type].fast(JavaObject* ssb,
+//                                  int field_offset,
+//                                  bool is_volatile)
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetFast,
+                          art_portable_sget.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt32Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetWideFast,
+                          art_portable_sget_wide.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt64Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetObjectFast,
+                          art_portable_sget_object.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kJavaObjectTy, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetBooleanFast,
+                          art_portable_sget_boolean.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt1Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetByteFast,
+                          art_portable_sget_byte.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt8Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetCharFast,
+                          art_portable_sget_char.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetShortFast,
+                          art_portable_sget_short.fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          _JTYPE(kInt16Ty, kField),
+                          _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
+
+// void art_portable_sput_[type](uint32_t field_idx,
+//                           Method* referrer,
+//                           [type] new_value)
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPut,
+                          art_portable_sput,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt32Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutWide,
+                          art_portable_sput_wide,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt64Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutObject,
+                          art_portable_sput_object,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kJavaObjectTy, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutBoolean,
+                          art_portable_sput_boolean,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt1Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutByte,
+                          art_portable_sput_byte,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt8Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutChar,
+                          art_portable_sput_char,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt16Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutShort,
+                          art_portable_sput_short,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt16Ty, kField)))
+
+// void art_portable_sput_[type].fast(JavaObject* ssb,
+//                                int field_offset,
+//                                bool is_volatile,
+//                                [type] new_value)
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutFast,
+                          art_portable_sput.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt32Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutWideFast,
+                          art_portable_sput_wide.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt64Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutObjectFast,
+                          art_portable_sput_object.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kJavaObjectTy, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutBooleanFast,
+                          art_portable_sput_boolean.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt1Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutByteFast,
+                          art_portable_sput_byte.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt8Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutCharFast,
+                          art_portable_sput_char.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt16Ty, kField)))
+
+_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutShortFast,
+                          art_portable_sput_short.fast,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt16Ty, kField)))
+
+// JavaObject* art_portable_load_declaring_class_ssb(Method* method)
+// Load the static storage base of the class that given method resides
+_EVAL_DEF_INTRINSICS_FUNC(LoadDeclaringClassSSB,
+                          art_portable_load_declaring_class_ssb,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kJavaMethodTy))
+
+// JavaObject* art_portable_load_class_ssb_from_dex_cache(uint32_t type_idx)
+_EVAL_DEF_INTRINSICS_FUNC(LoadClassSSBFromDexCache,
+                          art_portable_load_class_ssb_from_dex_cache,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// JavaObject* art_portable_init_and_load_class_ssb(uint32_t type_idx,
+//                                              Method* referrer,
+//                                              Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(InitializeAndLoadClassSSB,
+                          art_portable_init_and_load_class_ssb,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
+
+//----------------------------------------------------------------------------
+// High-level Array get/put
+//
+// Similar to art_portable_aget/aput_xxx, but checks not yet performed.
+// OptFlags contain info describing whether frontend has determined that
+// null check and/or array bounds check may be skipped.
+//
+// [type] void art_portable_hl_aget_[type](int optFlags, JavaObject* array, uint32_t index)
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGet,
+                          art_portable_hl_aget,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetFloat,
+                          art_portable_hl_aget_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kFloatTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetWide,
+                          art_portable_hl_aget_wide,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetDouble,
+                          art_portable_hl_aget_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kDoubleTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetObject,
+                          art_portable_hl_aget_object,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetBoolean,
+                          art_portable_hl_aget_boolean,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetByte,
+                          art_portable_hl_aget_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetChar,
+                          art_portable_hl_aget_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetShort,
+                          art_portable_hl_aget_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+// void art_portable_aput_[type](int optFlags, [type] value, JavaObject* array, uint32_t index)
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPut,
+                          art_portable_hl_aput,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutFloat,
+                          art_portable_hl_aput_float,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kFloatTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutWide,
+                          art_portable_hl_aput_wide,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt64Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutDouble,
+                          art_portable_hl_aput_double,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kDoubleTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutObject,
+                          art_portable_hl_aput_object,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kJavaObjectTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutBoolean,
+                          art_portable_hl_aput_boolean,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutByte,
+                          art_portable_hl_aput_byte,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutChar,
+                          art_portable_hl_aput_char,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutShort,
+                          art_portable_hl_aput_short,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+//----------------------------------------------------------------------------
+// High-level Instance get/put
+//
+// Similar to art_portable_iget/iput_xxx, but checks not yet performed.
+// OptFlags contain info describing whether frontend has determined that
+// null check may be skipped.
+//
+// [type] void art_portable_hl_iget_[type](int optFlags, JavaObject* obj, uint32_t field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLIGet,
+                          art_portable_hl_iget,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetFloat,
+                          art_portable_hl_iget_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kFloatTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetWide,
+                          art_portable_hl_iget_wide,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetDouble,
+                          art_portable_hl_iget_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kDoubleTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetObject,
+                          art_portable_hl_iget_object,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetBoolean,
+                          art_portable_hl_iget_boolean,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetByte,
+                          art_portable_hl_iget_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetChar,
+                          art_portable_hl_iget_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIGetShort,
+                          art_portable_hl_iget_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+// void art_portable_iput_[type](int optFlags, [type] value, JavaObject* obj, uint32_t field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLIPut,
+                          art_portable_hl_iput,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutFloat,
+                          art_portable_hl_iput_float,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kFloatTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutWide,
+                          art_portable_hl_iput_wide,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt64Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutDouble,
+                          art_portable_hl_iput_double,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kDoubleTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutObject,
+                          art_portable_hl_iput_object,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kJavaObjectTy, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutBoolean,
+                          art_portable_hl_iput_boolean,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutByte,
+                          art_portable_hl_iput_byte,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutChar,
+                          art_portable_hl_iput_char,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(HLIPutShort,
+                          art_portable_hl_iput_short,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
+
+//----------------------------------------------------------------------------
+// High-level Invokes (fast-path determination not yet performed)
+//
+// NOTE: We expect these intrinsics to be temporary.  Once calling conventions are
+//       fully merged, the unified front end will lower down to the
+//       InvokeRetxxx() intrinsics in the next section and these will be
+//       removed.
+//
+// arg0: InvokeType [ignored if FilledNewArray]
+// arg1: method_idx [ignored if FilledNewArray]
+// arg2: optimization_flags (primary to note whether null checking is needed)
+// [arg3..argN]: actual arguments
+//----------------------------------------------------------------------------
+// INVOKE method returns void
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeVoid,
+                          art_portable_hl_invoke.void,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// INVOKE method returns object
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeObj,
+                          art_portable_hl_invoke.obj,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// INVOKE method returns int
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeInt,
+                          art_portable_hl_invoke.i32,
+                          kAttrNone,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// INVOKE method returns float
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeFloat,
+                          art_portable_hl_invoke.f32,
+                          kAttrNone,
+                          kFloatTy,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// INVOKE method returns long
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeLong,
+                          art_portable_hl_invoke.i64,
+                          kAttrNone,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// INVOKE method returns double
+_EVAL_DEF_INTRINSICS_FUNC(HLInvokeDouble,
+                          art_portable_hl_invoke.f64,
+                          kAttrNone,
+                          kDoubleTy,
+                          _EXPAND_ARG1(kVarArgTy))
+
+// FILLED_NEW_ARRAY returns object
+_EVAL_DEF_INTRINSICS_FUNC(HLFilledNewArray,
+                          art_portable_hl_filled_new_array,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kVarArgTy))
+
+//----------------------------------------------------------------------------
+// Invoke
+//----------------------------------------------------------------------------
+
+// Method* art_portable_find_static_method_with_access_check(uint32_t method_idx,
+//                                                       JavaObject* this,
+//                                                       Method* referrer,
+//                                                       Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(FindStaticMethodWithAccessCheck,
+                          art_portable_find_static_method_with_access_check,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// Method* art_portable_find_direct_method_with_access_check(uint32_t method_idx,
+//                                                       JavaObject* this,
+//                                                       Method* referrer,
+//                                                       Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(FindDirectMethodWithAccessCheck,
+                          art_portable_find_direct_method_with_access_check,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// Method* art_portable_find_virtual_method_with_access_check(uint32_t method_idx,
+//                                                        JavaObject* this,
+//                                                        Method* referrer,
+//                                                        Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(FindVirtualMethodWithAccessCheck,
+                          art_portable_find_virtual_method_with_access_check,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// Method* art_portable_find_super_method_with_access_check(uint32_t method_idx,
+//                                                      JavaObject* this,
+//                                                      Method* referrer,
+//                                                      Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(FindSuperMethodWithAccessCheck,
+                          art_portable_find_super_method_with_access_check,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// Method* art_portable_find_interface_method_with_access_check(uint32_t method_idx,
+//                                                          JavaObject* this,
+//                                                          Method* referrer,
+//                                                          Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(FindInterfaceMethodWithAccessCheck,
+                          art_portable_find_interface_method_with_access_check,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// Method* art_portable_get_sd_callee_method_obj_addr(uint32_t method_idx)
+_EVAL_DEF_INTRINSICS_FUNC(GetSDCalleeMethodObjAddrFast,
+                          art_portable_get_sd_callee_method_obj_addr_fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaMethodTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// Method* art_portable_get_virtual_callee_method_obj_addr(uint32_t vtable_idx,
+//                                                     JavaObject* this)
+_EVAL_DEF_INTRINSICS_FUNC(GetVirtualCalleeMethodObjAddrFast,
+                          art_portable_get_virtual_callee_method_obj_addr_fast,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaMethodTy,
+                          _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
+
+// Method* art_portable_get_interface_callee_method_obj_addr(uint32_t method_idx,
+//                                                       JavaObject* this,
+//                                                       Method* referrer,
+//                                                       Thread* thread)
+_EVAL_DEF_INTRINSICS_FUNC(GetInterfaceCalleeMethodObjAddrFast,
+                          art_portable_get_interface_callee_method_obj_addr_fast,
+                          kAttrNone,
+                          kJavaMethodTy,
+                          _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
+
+// [type] art_portable_invoke.[type](Method* callee, ...)
+// INVOKE method returns void
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetVoid,
+                          art_portable_invoke.void,
+                          kAttrNone,
+                          kVoidTy,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type boolean
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetBoolean,
+                          art_portable_invoke.bool,
+                          kAttrNone,
+                          kInt1Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type byte
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetByte,
+                          art_portable_invoke.byte,
+                          kAttrNone,
+                          kInt8Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type char
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetChar,
+                          art_portable_invoke.char,
+                          kAttrNone,
+                          kInt16Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type short
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetShort,
+                          art_portable_invoke.short,
+                          kAttrNone,
+                          kInt16Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type int
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetInt,
+                          art_portable_invoke.int,
+                          kAttrNone,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type long
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetLong,
+                          art_portable_invoke.long,
+                          kAttrNone,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type float
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetFloat,
+                          art_portable_invoke.float,
+                          kAttrNone,
+                          kFloatTy,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type double
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetDouble,
+                          art_portable_invoke.double,
+                          kAttrNone,
+                          kDoubleTy,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+// INVOKE method returns the value of type "object"
+_EVAL_DEF_INTRINSICS_FUNC(InvokeRetObject,
+                          art_portable_invoke.object,
+                          kAttrNone,
+                          kJavaObjectTy,
+                          _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
+
+//----------------------------------------------------------------------------
+// Math
+//----------------------------------------------------------------------------
+
+// int art_portable_{div,rem}_int(int a, int b)
+_EVAL_DEF_INTRINSICS_FUNC(DivInt,
+                          art_portable_div_int,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(RemInt,
+                          art_portable_rem_int,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// long art_portable_{div,rem}_long(long a, long b)
+_EVAL_DEF_INTRINSICS_FUNC(DivLong,
+                          art_portable_div_long,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kInt64Ty, kInt64Ty))
+
+_EVAL_DEF_INTRINSICS_FUNC(RemLong,
+                          art_portable_rem_long,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kInt64Ty, kInt64Ty))
+
+// int64_t art_portable_d2l(double f)
+_EVAL_DEF_INTRINSICS_FUNC(D2L,
+                          art_portable_d2l,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kDoubleTy))
+
+// int32_t art_portable_d2l(double f)
+_EVAL_DEF_INTRINSICS_FUNC(D2I,
+                          art_portable_d2i,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kDoubleTy))
+
+// int64_t art_portable_f2l(float f)
+_EVAL_DEF_INTRINSICS_FUNC(F2L,
+                          art_portable_f2l,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kFloatTy))
+
+// int32_t art_portable_f2i(float f)
+_EVAL_DEF_INTRINSICS_FUNC(F2I,
+                          art_portable_f2i,
+                          kAttrReadNone | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kFloatTy))
+
+//----------------------------------------------------------------------------
+// sput intrinsics to assist MIR to Greenland_ir conversion.
+// "HL" versions - will be deprecated when fast/slow path handling done
+// in the common frontend.
+//----------------------------------------------------------------------------
+
+// void sput_hl(int field_idx, int val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSput,
+                          art_portable_hl_sput,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// void sput_hl_object(int field_idx, object* val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputObject,
+                          art_portable_hl_sput_object,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
+
+// void sput_hl_boolean(int field_idx, kInt1Ty)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputBoolean,
+                          art_portable_hl_sput_boolean,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// void sput_hl_byte(int field_idx, int val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputByte,
+                          art_portable_hl_sput_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// void sput_hl_char(int field_idx, kInt16Ty val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputChar,
+                          art_portable_hl_sput_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// void sput_hl_short(int field_idx, int val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputShort,
+                          art_portable_hl_sput_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt32Ty))
+
+// void sput_hl_wide(int field_idx, long val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputWide,
+                          art_portable_hl_sput_wide,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kInt64Ty))
+
+// void sput_hl_double(int field_idx, double val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputDouble,
+                          art_portable_hl_sput_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kDoubleTy))
+
+// void sput_hl_float(int field_idx, float val)
+_EVAL_DEF_INTRINSICS_FUNC(HLSputFloat,
+                          art_portable_hl_sput_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kFloatTy))
+
+//----------------------------------------------------------------------------
+// sget intrinsics to assist MIR to Greenland_ir conversion.
+// "HL" versions - will be deprecated when fast/slow path handling done
+// in the common frontend.
+//----------------------------------------------------------------------------
+
+// int sget_hl(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSget,
+                          art_portable_hl_sget,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// object* sget_hl_object(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetObject,
+                          art_portable_hl_sget_object,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// boolean sget_hl_boolean(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetBoolean,
+                          art_portable_hl_sget_boolean,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// byte sget_hl_byte(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetByte,
+                          art_portable_hl_sget_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// char sget_hl_char(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetChar,
+                          art_portable_hl_sget_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// char sget_hl_short(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetShort,
+                          art_portable_hl_sget_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// char sget_hl_wide(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetWide,
+                          art_portable_hl_sget_wide,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// char sget_hl_double(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetDouble,
+                          art_portable_hl_sget_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kDoubleTy,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// char sget_hl_float(int field_idx)
+_EVAL_DEF_INTRINSICS_FUNC(HLSgetFloat,
+                          art_portable_hl_sget_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kFloatTy,
+                          _EXPAND_ARG1(kInt32Ty))
+//----------------------------------------------------------------------------
+// Monitor enter/exit
+//----------------------------------------------------------------------------
+// uint32_t art_portable_monitor_enter(int optFlags, JavaObject* obj)
+_EVAL_DEF_INTRINSICS_FUNC(MonitorEnter,
+                          art_portable_monitor_enter,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
+
+// uint32_t art_portable_monitor_exit(int optFlags, JavaObject* obj)
+_EVAL_DEF_INTRINSICS_FUNC(MonitorExit,
+                          art_portable_monitor_exit,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
+
+//----------------------------------------------------------------------------
+// Shadow Frame
+//----------------------------------------------------------------------------
+
+// void art_portable_alloca_shadow_frame(int num_entry)
+_EVAL_DEF_INTRINSICS_FUNC(AllocaShadowFrame,
+                          art_portable_alloca_shadow_frame,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+// void art_portable_set_vreg(int entry_idx, ...)
+_EVAL_DEF_INTRINSICS_FUNC(SetVReg,
+                          art_portable_set_vreg,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG2(kInt32ConstantTy, kVarArgTy))
+
+// void art_portable_pop_shadow_frame()
+_EVAL_DEF_INTRINSICS_FUNC(PopShadowFrame,
+                          art_portable_pop_shadow_frame,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG0())
+
+// void art_portable_update_dex_pc(uint32_t dex_pc)
+_EVAL_DEF_INTRINSICS_FUNC(UpdateDexPC,
+                          art_portable_update_dex_pc,
+                          kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG1(kInt32ConstantTy))
+
+//----------------------------------------------------------------------------
+// FP Comparison
+//----------------------------------------------------------------------------
+// int cmpl_float(float, float)
+_EVAL_DEF_INTRINSICS_FUNC(CmplFloat,
+                          art_portable_cmpl_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kFloatTy, kFloatTy))
+
+// int cmpg_float(float, float)
+_EVAL_DEF_INTRINSICS_FUNC(CmpgFloat,
+                          art_portable_cmpg_float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kFloatTy, kFloatTy))
+
+// int cmpl_double(double, double)
+_EVAL_DEF_INTRINSICS_FUNC(CmplDouble,
+                          art_portable_cmpl_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kDoubleTy, kDoubleTy))
+
+// int cmpg_double(double, double)
+_EVAL_DEF_INTRINSICS_FUNC(CmpgDouble,
+                          art_portable_cmpg_double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kDoubleTy, kDoubleTy))
+
+//----------------------------------------------------------------------------
+// Long Comparison
+//----------------------------------------------------------------------------
+// int cmp_long(long, long)
+_EVAL_DEF_INTRINSICS_FUNC(CmpLong,
+                          art_portable_cmp_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt64Ty, kInt64Ty))
+
+//----------------------------------------------------------------------------
+// Const intrinsics to assist MIR to Greenland_ir conversion.  Should not materialize
+// For simplicity, all use integer input
+//----------------------------------------------------------------------------
+// int const_int(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstInt,
+                          art_portable_const_int,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// JavaObject* const_obj(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstObj,
+                          art_portable_const_obj,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// long const_long(long)
+_EVAL_DEF_INTRINSICS_FUNC(ConstLong,
+                          art_portable_const_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kInt64Ty))
+
+// float const_float(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstFloat,
+                          art_portable_const_Float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kFloatTy,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// double const_double(long)
+_EVAL_DEF_INTRINSICS_FUNC(ConstDouble,
+                          art_portable_const_Double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kDoubleTy,
+                          _EXPAND_ARG1(kInt64Ty))
+
+
+//----------------------------------------------------------------------------
+// Copy intrinsics to assist MIR to Greenland_ir conversion.  Should not materialize
+//----------------------------------------------------------------------------
+
+// void method_info(void)
+_EVAL_DEF_INTRINSICS_FUNC(MethodInfo,
+                          art_portable_method_info,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG0())
+
+// int copy_int(int)
+_EVAL_DEF_INTRINSICS_FUNC(CopyInt,
+                          art_portable_copy_int,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// JavaObject* copy_obj(obj)
+_EVAL_DEF_INTRINSICS_FUNC(CopyObj,
+                          art_portable_copy_obj,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kJavaObjectTy,
+                          _EXPAND_ARG1(kJavaObjectTy))
+
+// long copy_long(long)
+_EVAL_DEF_INTRINSICS_FUNC(CopyLong,
+                          art_portable_copy_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG1(kInt64Ty))
+
+// float copy_float(float)
+_EVAL_DEF_INTRINSICS_FUNC(CopyFloat,
+                          art_portable_copy_Float,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kFloatTy,
+                          _EXPAND_ARG1(kFloatTy))
+
+// double copy_double(double)
+_EVAL_DEF_INTRINSICS_FUNC(CopyDouble,
+                          art_portable_copy_Double,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kDoubleTy,
+                          _EXPAND_ARG1(kDoubleTy))
+
+//----------------------------------------------------------------------------
+// Shift intrinsics.  Shift semantics for Dalvik are a bit different than
+// the llvm shift operators.  For 32-bit shifts, the shift count is constrained
+// to the range of 0..31, while for 64-bit shifts we limit to 0..63.
+// Further, the shift count for Long shifts in Dalvik is 32 bits, while
+// llvm requires a 64-bit shift count. For GBC, we represent shifts as an
+//  intrinsic to allow most efficient target-dependent lowering.
+//----------------------------------------------------------------------------
+// long shl_long(long,int)
+_EVAL_DEF_INTRINSICS_FUNC(SHLLong,
+                          art_portable_shl_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kInt64Ty,kInt32Ty))
+// long shr_long(long,int)
+_EVAL_DEF_INTRINSICS_FUNC(SHRLong,
+                          art_portable_shr_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kInt64Ty,kInt32Ty))
+// long ushr_long(long,int)
+_EVAL_DEF_INTRINSICS_FUNC(USHRLong,
+                          art_portable_ushl_long,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt64Ty,
+                          _EXPAND_ARG2(kInt64Ty,kInt32Ty))
+// int shl_int(int,int)
+_EVAL_DEF_INTRINSICS_FUNC(SHLInt,
+                          art_portable_shl_int,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty,kInt32Ty))
+// long shr_int(int,int)
+_EVAL_DEF_INTRINSICS_FUNC(SHRInt,
+                          art_portable_shr_int,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty,kInt32Ty))
+// int ushr_long(int,int)
+_EVAL_DEF_INTRINSICS_FUNC(USHRInt,
+                          art_portable_ushl_int,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG2(kInt32Ty,kInt32Ty))
+//----------------------------------------------------------------------------
+// Conversion instrinsics.  Note: these should eventually be removed.  We
+// can express these directly in bitcode, but by using intrinsics the
+// Quick compiler can be more efficient.  Some extra optimization infrastructure
+// will have to be developed to undo the bitcode verbosity when these are
+// done inline.
+//----------------------------------------------------------------------------
+// int int_to_byte(int)
+_EVAL_DEF_INTRINSICS_FUNC(IntToByte,
+                          art_portable_int_to_byte,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// int int_to_char(int)
+_EVAL_DEF_INTRINSICS_FUNC(IntToChar,
+                          art_portable_int_to_char,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+// int int_to_short(int)
+_EVAL_DEF_INTRINSICS_FUNC(IntToShort,
+                          art_portable_int_to_short,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kInt32Ty,
+                          _EXPAND_ARG1(kInt32Ty))
+
+//----------------------------------------------------------------------------
+// Memory barrier
+//----------------------------------------------------------------------------
+// void constructor_barrier()
+_EVAL_DEF_INTRINSICS_FUNC(ConstructorBarrier,
+                          art_portable_constructor_barrier,
+                          kAttrReadOnly | kAttrNoThrow,
+                          kVoidTy,
+                          _EXPAND_ARG0())
+
+// Clean up all internal used macros
+#undef _EXPAND_ARG0
+#undef _EXPAND_ARG1
+#undef _EXPAND_ARG2
+#undef _EXPAND_ARG3
+#undef _EXPAND_ARG4
+#undef _EXPAND_ARG5
+
+#undef _JTYPE_OF_kInt1Ty_UNDER_kArray
+#undef _JTYPE_OF_kInt8Ty_UNDER_kArray
+#undef _JTYPE_OF_kInt16Ty_UNDER_kArray
+#undef _JTYPE_OF_kInt32Ty_UNDER_kArray
+#undef _JTYPE_OF_kInt64Ty_UNDER_kArray
+#undef _JTYPE_OF_kJavaObjectTy_UNDER_kArray
+
+#undef _JTYPE_OF_kInt1Ty_UNDER_kField
+#undef _JTYPE_OF_kInt8Ty_UNDER_kField
+#undef _JTYPE_OF_kInt16Ty_UNDER_kField
+#undef _JTYPE_OF_kInt32Ty_UNDER_kField
+#undef _JTYPE_OF_kInt64Ty_UNDER_kField
+#undef _JTYPE_OF_kJavaObjectTy_UNDER_kField
+
+#undef DEF_INTRINSICS_FUNC
diff --git a/compiler/llvm/intrinsic_helper.cc b/compiler/llvm/intrinsic_helper.cc
new file mode 100644
index 0000000..a34cb33
--- /dev/null
+++ b/compiler/llvm/intrinsic_helper.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsic_helper.h"
+
+#include "ir_builder.h"
+
+#include <llvm/IR/Attributes.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/IRBuilder.h>
+#include <llvm/IR/Intrinsics.h>
+
+namespace art {
+namespace llvm {
+
+const IntrinsicHelper::IntrinsicInfo IntrinsicHelper::Info[] = {
+#define DEF_INTRINSICS_FUNC(_, NAME, ATTR, RET_TYPE, ARG1_TYPE, ARG2_TYPE, \
+                                                     ARG3_TYPE, ARG4_TYPE, \
+                                                     ARG5_TYPE) \
+  { #NAME, ATTR, RET_TYPE, { ARG1_TYPE, ARG2_TYPE, \
+                             ARG3_TYPE, ARG4_TYPE, \
+                             ARG5_TYPE} },
+#include "intrinsic_func_list.def"
+};
+
+static ::llvm::Type* GetLLVMTypeOfIntrinsicValType(IRBuilder& irb,
+                                                   IntrinsicHelper::IntrinsicValType type) {
+  switch (type) {
+    case IntrinsicHelper::kVoidTy: {
+      return irb.getVoidTy();
+    }
+    case IntrinsicHelper::kJavaObjectTy: {
+      return irb.getJObjectTy();
+    }
+    case IntrinsicHelper::kJavaMethodTy: {
+      return irb.getJMethodTy();
+    }
+    case IntrinsicHelper::kJavaThreadTy: {
+      return irb.getJThreadTy();
+    }
+    case IntrinsicHelper::kInt1Ty:
+    case IntrinsicHelper::kInt1ConstantTy: {
+      return irb.getInt1Ty();
+    }
+    case IntrinsicHelper::kInt8Ty:
+    case IntrinsicHelper::kInt8ConstantTy: {
+      return irb.getInt8Ty();
+    }
+    case IntrinsicHelper::kInt16Ty:
+    case IntrinsicHelper::kInt16ConstantTy: {
+      return irb.getInt16Ty();
+    }
+    case IntrinsicHelper::kInt32Ty:
+    case IntrinsicHelper::kInt32ConstantTy: {
+      return irb.getInt32Ty();
+    }
+    case IntrinsicHelper::kInt64Ty:
+    case IntrinsicHelper::kInt64ConstantTy: {
+      return irb.getInt64Ty();
+    }
+    case IntrinsicHelper::kFloatTy:
+    case IntrinsicHelper::kFloatConstantTy: {
+      return irb.getFloatTy();
+    }
+    case IntrinsicHelper::kDoubleTy:
+    case IntrinsicHelper::kDoubleConstantTy: {
+      return irb.getDoubleTy();
+    }
+    case IntrinsicHelper::kNone:
+    case IntrinsicHelper::kVarArgTy:
+    default: {
+      LOG(FATAL) << "Invalid intrinsic type " << type << "to get LLVM type!";
+      return NULL;
+    }
+  }
+  // unreachable
+}
+
+IntrinsicHelper::IntrinsicHelper(::llvm::LLVMContext& context,
+                                 ::llvm::Module& module) {
+  IRBuilder irb(context, module, *this);
+
+  ::memset(intrinsic_funcs_, 0, sizeof(intrinsic_funcs_));
+
+  // This loop does the following things:
+  // 1. Introduce the intrinsic function into the module
+  // 2. Add "nocapture" and "noalias" attribute to the arguments in all
+  //    intrinsics functions.
+  // 3. Initialize intrinsic_funcs_map_.
+  for (unsigned i = 0; i < MaxIntrinsicId; i++) {
+    IntrinsicId id = static_cast<IntrinsicId>(i);
+    const IntrinsicInfo& info = Info[i];
+
+    // Parse and construct the argument type from IntrinsicInfo
+    ::llvm::Type* arg_type[kIntrinsicMaxArgc];
+    unsigned num_args = 0;
+    bool is_var_arg = false;
+    for (unsigned arg_iter = 0; arg_iter < kIntrinsicMaxArgc; arg_iter++) {
+      IntrinsicValType type = info.arg_type_[arg_iter];
+
+      if (type == kNone) {
+        break;
+      } else if (type == kVarArgTy) {
+        // Variable argument type must be the last argument
+        is_var_arg = true;
+        break;
+      }
+
+      arg_type[num_args++] = GetLLVMTypeOfIntrinsicValType(irb, type);
+    }
+
+    // Construct the function type
+    ::llvm::Type* ret_type =
+        GetLLVMTypeOfIntrinsicValType(irb, info.ret_val_type_);
+
+    ::llvm::FunctionType* type =
+        ::llvm::FunctionType::get(ret_type,
+                                  ::llvm::ArrayRef< ::llvm::Type*>(arg_type, num_args),
+                                  is_var_arg);
+
+    // Declare the function
+    ::llvm::Function *fn = ::llvm::Function::Create(type,
+                                                    ::llvm::Function::ExternalLinkage,
+                                                     info.name_, &module);
+
+    if (info.attr_ & kAttrReadOnly) {
+        fn->setOnlyReadsMemory();
+    }
+    if (info.attr_ & kAttrReadNone) {
+        fn->setDoesNotAccessMemory();
+    }
+    // None of the intrinsics throws exception
+    fn->setDoesNotThrow();
+
+    intrinsic_funcs_[id] = fn;
+
+    DCHECK_NE(fn, static_cast< ::llvm::Function*>(NULL)) << "Intrinsic `"
+        << GetName(id) << "' was not defined!";
+
+    // Add "noalias" and "nocapture" attribute to all arguments of pointer type
+    for (::llvm::Function::arg_iterator arg_iter = fn->arg_begin(),
+            arg_end = fn->arg_end(); arg_iter != arg_end; arg_iter++) {
+      if (arg_iter->getType()->isPointerTy()) {
+        std::vector< ::llvm::Attribute::AttrKind> attributes;
+        attributes.push_back(::llvm::Attribute::NoCapture);
+        attributes.push_back(::llvm::Attribute::NoAlias);
+        ::llvm::AttributeSet attribute_set = ::llvm::AttributeSet::get(fn->getContext(),
+                                                                       arg_iter->getArgNo(),
+                                                                       attributes);
+        arg_iter->addAttr(attribute_set);
+      }
+    }
+
+    // Insert the newly created intrinsic to intrinsic_funcs_map_
+    if (!intrinsic_funcs_map_.insert(std::make_pair(fn, id)).second) {
+      LOG(FATAL) << "Duplicate entry in intrinsic functions map?";
+    }
+  }
+
+  return;
+}
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/intrinsic_helper.h b/compiler/llvm/intrinsic_helper.h
new file mode 100644
index 0000000..49b8a95
--- /dev/null
+++ b/compiler/llvm/intrinsic_helper.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GREENLAND_INTRINSIC_HELPER_H_
+#define ART_SRC_GREENLAND_INTRINSIC_HELPER_H_
+
+#include "base/logging.h"
+
+#include <llvm/ADT/DenseMap.h>
+
+namespace llvm {
+  class Function;
+  class FunctionType;
+  class LLVMContext;
+  class Module;
+}  // namespace llvm
+
+namespace art {
+namespace llvm {
+
+class IRBuilder;
+
+class IntrinsicHelper {
+ public:
+  enum IntrinsicId {
+#define DEF_INTRINSICS_FUNC(ID, ...) ID,
+#include "intrinsic_func_list.def"
+    MaxIntrinsicId,
+
+    // Pseudo-intrinsics Id
+    UnknownId
+  };
+
+  enum IntrinsicAttribute {
+    kAttrNone     = 0,
+
+    // Intrinsic that neither modified the memory state nor refer to the global
+    // state
+    kAttrReadNone = 1 << 0,
+
+    // Intrinsic that doesn't modify the memory state. Note that one should set
+    // this flag carefully when the intrinsic may throw exception. Since the
+    // thread state is implicitly modified when an exception is thrown.
+    kAttrReadOnly = 1 << 1,
+
+    // Note that intrinsic without kAttrNoThrow and kAttrDoThrow set means that
+    // intrinsic generates exception in some cases
+
+    // Intrinsic that never generates exception
+    kAttrNoThrow  = 1 << 2,
+    // Intrinsic that always generate exception
+    kAttrDoThrow  = 1 << 3,
+  };
+
+  enum IntrinsicValType {
+    kNone,
+
+    kVoidTy,
+
+    kJavaObjectTy,
+    kJavaMethodTy,
+    kJavaThreadTy,
+
+    kInt1Ty,
+    kInt8Ty,
+    kInt16Ty,
+    kInt32Ty,
+    kInt64Ty,
+    kFloatTy,
+    kDoubleTy,
+
+    kInt1ConstantTy,
+    kInt8ConstantTy,
+    kInt16ConstantTy,
+    kInt32ConstantTy,
+    kInt64ConstantTy,
+    kFloatConstantTy,
+    kDoubleConstantTy,
+
+    kVarArgTy,
+  };
+
+  enum {
+    kIntrinsicMaxArgc = 5
+  };
+
+  typedef struct IntrinsicInfo {
+    const char* name_;
+    unsigned attr_;
+    IntrinsicValType ret_val_type_;
+    IntrinsicValType arg_type_[kIntrinsicMaxArgc];
+  } IntrinsicInfo;
+
+ private:
+  static const IntrinsicInfo Info[];
+
+ public:
+  static const IntrinsicInfo& GetInfo(IntrinsicId id) {
+    DCHECK(id >= 0 && id < MaxIntrinsicId) << "Unknown ART intrinsics ID: "
+                                           << id;
+    return Info[id];
+  }
+
+  static const char* GetName(IntrinsicId id) {
+    return (id <= MaxIntrinsicId) ? GetInfo(id).name_ : "InvalidIntrinsic";
+  }
+
+  static unsigned GetAttr(IntrinsicId id) {
+    return GetInfo(id).attr_;
+  }
+
+ public:
+  IntrinsicHelper(::llvm::LLVMContext& context, ::llvm::Module& module);
+
+  ::llvm::Function* GetIntrinsicFunction(IntrinsicId id) {
+    DCHECK(id >= 0 && id < MaxIntrinsicId) << "Unknown ART intrinsics ID: "
+                                           << id;
+    return intrinsic_funcs_[id];
+  }
+
+  IntrinsicId GetIntrinsicId(const ::llvm::Function* func) const {
+    ::llvm::DenseMap<const ::llvm::Function*, IntrinsicId>::const_iterator
+        i = intrinsic_funcs_map_.find(func);
+    if (i == intrinsic_funcs_map_.end()) {
+      return UnknownId;
+    } else {
+      return i->second;
+    }
+  }
+
+ private:
+  // FIXME: "+1" is to workaround the GCC bugs:
+  // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+  // Remove this when uses newer GCC (> 4.4.3)
+  ::llvm::Function* intrinsic_funcs_[MaxIntrinsicId + 1];
+
+  // Map a llvm::Function to its intrinsic id
+  ::llvm::DenseMap<const ::llvm::Function*, IntrinsicId> intrinsic_funcs_map_;
+};
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_GREENLAND_INTRINSIC_HELPER_H_
diff --git a/compiler/llvm/ir_builder.cc b/compiler/llvm/ir_builder.cc
new file mode 100644
index 0000000..a65cf2b
--- /dev/null
+++ b/compiler/llvm/ir_builder.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir_builder.h"
+
+#include "base/stringprintf.h"
+
+#include <llvm/IR/Module.h>
+
+namespace art {
+namespace llvm {
+
+
+//----------------------------------------------------------------------------
+// General
+//----------------------------------------------------------------------------
+
+IRBuilder::IRBuilder(::llvm::LLVMContext& context, ::llvm::Module& module,
+                     IntrinsicHelper& intrinsic_helper)
+    : LLVMIRBuilder(context), module_(&module), mdb_(context), java_object_type_(NULL),
+      java_method_type_(NULL), java_thread_type_(NULL), intrinsic_helper_(intrinsic_helper) {
+  // Get java object type from module
+  ::llvm::Type* jobject_struct_type = module.getTypeByName("JavaObject");
+  CHECK(jobject_struct_type != NULL);
+  java_object_type_ = jobject_struct_type->getPointerTo();
+
+  // If type of Method is not explicitly defined in the module, use JavaObject*
+  ::llvm::Type* type = module.getTypeByName("Method");
+  if (type != NULL) {
+    java_method_type_ = type->getPointerTo();
+  } else {
+    java_method_type_ = java_object_type_;
+  }
+
+  // If type of Thread is not explicitly defined in the module, use JavaObject*
+  type = module.getTypeByName("Thread");
+  if (type != NULL) {
+    java_thread_type_ = type->getPointerTo();
+  } else {
+    java_thread_type_ = java_object_type_;
+  }
+
+  // Create JEnv* type
+  ::llvm::Type* jenv_struct_type = ::llvm::StructType::create(context, "JEnv");
+  jenv_type_ = jenv_struct_type->getPointerTo();
+
+  // Get Art shadow frame struct type from module
+  art_frame_type_ = module.getTypeByName("ShadowFrame");
+  CHECK(art_frame_type_ != NULL);
+
+  runtime_support_ = NULL;
+}
+
+
+//----------------------------------------------------------------------------
+// Type Helper Function
+//----------------------------------------------------------------------------
+
+::llvm::Type* IRBuilder::getJType(JType jty) {
+  switch (jty) {
+  case kVoid:
+    return getJVoidTy();
+
+  case kBoolean:
+    return getJBooleanTy();
+
+  case kByte:
+    return getJByteTy();
+
+  case kChar:
+    return getJCharTy();
+
+  case kShort:
+    return getJShortTy();
+
+  case kInt:
+    return getJIntTy();
+
+  case kLong:
+    return getJLongTy();
+
+  case kFloat:
+    return getJFloatTy();
+
+  case kDouble:
+    return getJDoubleTy();
+
+  case kObject:
+    return getJObjectTy();
+
+  default:
+    LOG(FATAL) << "Unknown java type: " << jty;
+    return NULL;
+  }
+}
+
+::llvm::StructType* IRBuilder::getShadowFrameTy(uint32_t vreg_size) {
+  std::string name(StringPrintf("ShadowFrame%u", vreg_size));
+
+  // Try to find the existing struct type definition
+  if (::llvm::Type* type = module_->getTypeByName(name)) {
+    CHECK(::llvm::isa< ::llvm::StructType>(type));
+    return static_cast< ::llvm::StructType*>(type);
+  }
+
+  // Create new struct type definition
+  ::llvm::Type* elem_types[] = {
+    art_frame_type_,
+    ::llvm::ArrayType::get(getInt32Ty(), vreg_size),
+  };
+
+  return ::llvm::StructType::create(elem_types, name);
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h
new file mode 100644
index 0000000..734b22f
--- /dev/null
+++ b/compiler/llvm/ir_builder.h
@@ -0,0 +1,490 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_IR_BUILDER_H_
+#define ART_SRC_COMPILER_LLVM_IR_BUILDER_H_
+
+#include "backend_types.h"
+#include "dex/compiler_enums.h"
+#include "intrinsic_helper.h"
+#include "md_builder.h"
+#include "runtime_support_builder.h"
+#include "runtime_support_llvm_func.h"
+
+#include <llvm/IR/Constants.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/IRBuilder.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/Type.h>
+#include <llvm/Support/NoFolder.h>
+
+#include <stdint.h>
+
+
+namespace art {
+namespace llvm {
+
+class InserterWithDexOffset : public ::llvm::IRBuilderDefaultInserter<true> {
+  public:
+    InserterWithDexOffset() : node_(NULL) {}
+
+    void InsertHelper(::llvm::Instruction *I, const ::llvm::Twine &Name,
+                      ::llvm::BasicBlock *BB,
+                      ::llvm::BasicBlock::iterator InsertPt) const {
+      ::llvm::IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
+      if (node_ != NULL) {
+        I->setMetadata("DexOff", node_);
+      }
+    }
+
+    void SetDexOffset(::llvm::MDNode* node) {
+      node_ = node;
+    }
+  private:
+    ::llvm::MDNode* node_;
+};
+
+typedef ::llvm::IRBuilder<true, ::llvm::ConstantFolder, InserterWithDexOffset> LLVMIRBuilder;
+// NOTE: Here we define our own LLVMIRBuilder type alias, so that we can
+// switch "preserveNames" template parameter easily.
+
+
+class IRBuilder : public LLVMIRBuilder {
+ public:
+  //--------------------------------------------------------------------------
+  // General
+  //--------------------------------------------------------------------------
+
+  IRBuilder(::llvm::LLVMContext& context, ::llvm::Module& module,
+            IntrinsicHelper& intrinsic_helper);
+
+
+  //--------------------------------------------------------------------------
+  // Extend load & store for TBAA
+  //--------------------------------------------------------------------------
+
+  ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, ::llvm::MDNode* tbaa_info) {
+    ::llvm::LoadInst* inst = LLVMIRBuilder::CreateLoad(ptr);
+    inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
+    return inst;
+  }
+
+  ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr, ::llvm::MDNode* tbaa_info) {
+    ::llvm::StoreInst* inst = LLVMIRBuilder::CreateStore(val, ptr);
+    inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
+    return inst;
+  }
+
+  ::llvm::AtomicCmpXchgInst*
+  CreateAtomicCmpXchgInst(::llvm::Value* ptr, ::llvm::Value* cmp, ::llvm::Value* val,
+                          ::llvm::MDNode* tbaa_info) {
+    ::llvm::AtomicCmpXchgInst* inst =
+        LLVMIRBuilder::CreateAtomicCmpXchg(ptr, cmp, val, ::llvm::Acquire);
+    inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
+    return inst;
+  }
+
+  //--------------------------------------------------------------------------
+  // Extend memory barrier
+  //--------------------------------------------------------------------------
+  void CreateMemoryBarrier(MemBarrierKind barrier_kind) {
+#if ANDROID_SMP
+    // TODO: select atomic ordering according to given barrier kind.
+    CreateFence(::llvm::SequentiallyConsistent);
+#endif
+  }
+
+  //--------------------------------------------------------------------------
+  // TBAA
+  //--------------------------------------------------------------------------
+
+  // TODO: After we design the non-special TBAA info, re-design the TBAA interface.
+  ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, TBAASpecialType special_ty) {
+    return CreateLoad(ptr, mdb_.GetTBAASpecialType(special_ty));
+  }
+
+  ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr, TBAASpecialType special_ty) {
+    DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
+    return CreateStore(val, ptr, mdb_.GetTBAASpecialType(special_ty));
+  }
+
+  ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, TBAASpecialType special_ty, JType j_ty) {
+    return CreateLoad(ptr, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
+  }
+
+  ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr,
+                               TBAASpecialType special_ty, JType j_ty) {
+    DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
+    return CreateStore(val, ptr, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
+  }
+
+  ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
+                                       int64_t offset,
+                                       ::llvm::Type* type,
+                                       TBAASpecialType special_ty) {
+    return LoadFromObjectOffset(object_addr, offset, type, mdb_.GetTBAASpecialType(special_ty));
+  }
+
+  void StoreToObjectOffset(::llvm::Value* object_addr,
+                           int64_t offset,
+                           ::llvm::Value* new_value,
+                           TBAASpecialType special_ty) {
+    DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
+    StoreToObjectOffset(object_addr, offset, new_value, mdb_.GetTBAASpecialType(special_ty));
+  }
+
+  ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
+                                       int64_t offset,
+                                       ::llvm::Type* type,
+                                       TBAASpecialType special_ty, JType j_ty) {
+    return LoadFromObjectOffset(object_addr, offset, type, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
+  }
+
+  void StoreToObjectOffset(::llvm::Value* object_addr,
+                           int64_t offset,
+                           ::llvm::Value* new_value,
+                           TBAASpecialType special_ty, JType j_ty) {
+    DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
+    StoreToObjectOffset(object_addr, offset, new_value, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
+  }
+
+  ::llvm::AtomicCmpXchgInst*
+  CompareExchangeObjectOffset(::llvm::Value* object_addr,
+                              int64_t offset,
+                              ::llvm::Value* cmp_value,
+                              ::llvm::Value* new_value,
+                              TBAASpecialType special_ty) {
+    DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
+    return CompareExchangeObjectOffset(object_addr, offset, cmp_value, new_value,
+                                       mdb_.GetTBAASpecialType(special_ty));
+  }
+
+  void SetTBAA(::llvm::Instruction* inst, TBAASpecialType special_ty) {
+    inst->setMetadata(::llvm::LLVMContext::MD_tbaa, mdb_.GetTBAASpecialType(special_ty));
+  }
+
+
+  //--------------------------------------------------------------------------
+  // Static Branch Prediction
+  //--------------------------------------------------------------------------
+
+  // Import the orignal conditional branch
+  using LLVMIRBuilder::CreateCondBr;
+  ::llvm::BranchInst* CreateCondBr(::llvm::Value *cond,
+                                 ::llvm::BasicBlock* true_bb,
+                                 ::llvm::BasicBlock* false_bb,
+                                 ExpectCond expect) {
+    ::llvm::BranchInst* branch_inst = CreateCondBr(cond, true_bb, false_bb);
+    if (false) {
+      // TODO: http://b/8511695 Restore branch weight metadata
+      branch_inst->setMetadata(::llvm::LLVMContext::MD_prof, mdb_.GetBranchWeights(expect));
+    }
+    return branch_inst;
+  }
+
+
+  //--------------------------------------------------------------------------
+  // Pointer Arithmetic Helper Function
+  //--------------------------------------------------------------------------
+
+  ::llvm::IntegerType* getPtrEquivIntTy() {
+    return getInt32Ty();
+  }
+
+  size_t getSizeOfPtrEquivInt() {
+    return 4;
+  }
+
+  ::llvm::ConstantInt* getSizeOfPtrEquivIntValue() {
+    return getPtrEquivInt(getSizeOfPtrEquivInt());
+  }
+
+  ::llvm::ConstantInt* getPtrEquivInt(int64_t i) {
+    return ::llvm::ConstantInt::get(getPtrEquivIntTy(), i);
+  }
+
+  ::llvm::Value* CreatePtrDisp(::llvm::Value* base,
+                             ::llvm::Value* offset,
+                             ::llvm::PointerType* ret_ty) {
+
+    ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy());
+    ::llvm::Value* result_int = CreateAdd(base_int, offset);
+    ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty);
+
+    return result;
+  }
+
+  ::llvm::Value* CreatePtrDisp(::llvm::Value* base,
+                             ::llvm::Value* bs,
+                             ::llvm::Value* count,
+                             ::llvm::Value* offset,
+                             ::llvm::PointerType* ret_ty) {
+
+    ::llvm::Value* block_offset = CreateMul(bs, count);
+    ::llvm::Value* total_offset = CreateAdd(block_offset, offset);
+
+    return CreatePtrDisp(base, total_offset, ret_ty);
+  }
+
+  ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
+                                       int64_t offset,
+                                       ::llvm::Type* type,
+                                       ::llvm::MDNode* tbaa_info) {
+    // Convert offset to ::llvm::value
+    ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
+    // Calculate the value's address
+    ::llvm::Value* value_addr = CreatePtrDisp(object_addr, llvm_offset, type->getPointerTo());
+    // Load
+    return CreateLoad(value_addr, tbaa_info);
+  }
+
+  void StoreToObjectOffset(::llvm::Value* object_addr,
+                           int64_t offset,
+                           ::llvm::Value* new_value,
+                           ::llvm::MDNode* tbaa_info) {
+    // Convert offset to ::llvm::value
+    ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
+    // Calculate the value's address
+    ::llvm::Value* value_addr = CreatePtrDisp(object_addr,
+                                            llvm_offset,
+                                            new_value->getType()->getPointerTo());
+    // Store
+    CreateStore(new_value, value_addr, tbaa_info);
+  }
+
+  ::llvm::AtomicCmpXchgInst* CompareExchangeObjectOffset(::llvm::Value* object_addr,
+                                                       int64_t offset,
+                                                       ::llvm::Value* cmp_value,
+                                                       ::llvm::Value* new_value,
+                                                       ::llvm::MDNode* tbaa_info) {
+    // Convert offset to ::llvm::value
+    ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
+    // Calculate the value's address
+    ::llvm::Value* value_addr = CreatePtrDisp(object_addr,
+                                            llvm_offset,
+                                            new_value->getType()->getPointerTo());
+    // Atomic compare and exchange
+    return CreateAtomicCmpXchgInst(value_addr, cmp_value, new_value, tbaa_info);
+  }
+
+
+  //--------------------------------------------------------------------------
+  // Runtime Helper Function
+  //--------------------------------------------------------------------------
+
+  RuntimeSupportBuilder& Runtime() {
+    return *runtime_support_;
+  }
+
+  // TODO: Deprecate
+  ::llvm::Function* GetRuntime(runtime_support::RuntimeId rt) {
+    return runtime_support_->GetRuntimeSupportFunction(rt);
+  }
+
+  // TODO: Deprecate
+  void SetRuntimeSupport(RuntimeSupportBuilder* runtime_support) {
+    // Can only set once. We can't do this on constructor, because RuntimeSupportBuilder needs
+    // IRBuilder.
+    if (runtime_support_ == NULL && runtime_support != NULL) {
+      runtime_support_ = runtime_support;
+    }
+  }
+
+
+  //--------------------------------------------------------------------------
+  // Type Helper Function
+  //--------------------------------------------------------------------------
+
+  ::llvm::Type* getJType(char shorty_jty) {
+    return getJType(GetJTypeFromShorty(shorty_jty));
+  }
+
+  ::llvm::Type* getJType(JType jty);
+
+  ::llvm::Type* getJVoidTy() {
+    return getVoidTy();
+  }
+
+  ::llvm::IntegerType* getJBooleanTy() {
+    return getInt8Ty();
+  }
+
+  ::llvm::IntegerType* getJByteTy() {
+    return getInt8Ty();
+  }
+
+  ::llvm::IntegerType* getJCharTy() {
+    return getInt16Ty();
+  }
+
+  ::llvm::IntegerType* getJShortTy() {
+    return getInt16Ty();
+  }
+
+  ::llvm::IntegerType* getJIntTy() {
+    return getInt32Ty();
+  }
+
+  ::llvm::IntegerType* getJLongTy() {
+    return getInt64Ty();
+  }
+
+  ::llvm::Type* getJFloatTy() {
+    return getFloatTy();
+  }
+
+  ::llvm::Type* getJDoubleTy() {
+    return getDoubleTy();
+  }
+
+  ::llvm::PointerType* getJObjectTy() {
+    return java_object_type_;
+  }
+
+  ::llvm::PointerType* getJMethodTy() {
+    return java_method_type_;
+  }
+
+  ::llvm::PointerType* getJThreadTy() {
+    return java_thread_type_;
+  }
+
+  ::llvm::Type* getArtFrameTy() {
+    return art_frame_type_;
+  }
+
+  ::llvm::PointerType* getJEnvTy() {
+    return jenv_type_;
+  }
+
+  ::llvm::Type* getJValueTy() {
+    // NOTE: JValue is an union type, which may contains boolean, byte, char,
+    // short, int, long, float, double, Object.  However, LLVM itself does
+    // not support union type, so we have to return a type with biggest size,
+    // then bitcast it before we use it.
+    return getJLongTy();
+  }
+
+  ::llvm::StructType* getShadowFrameTy(uint32_t vreg_size);
+
+
+  //--------------------------------------------------------------------------
+  // Constant Value Helper Function
+  //--------------------------------------------------------------------------
+
+  ::llvm::ConstantInt* getJBoolean(bool is_true) {
+    return (is_true) ? getTrue() : getFalse();
+  }
+
+  ::llvm::ConstantInt* getJByte(int8_t i) {
+    return ::llvm::ConstantInt::getSigned(getJByteTy(), i);
+  }
+
+  ::llvm::ConstantInt* getJChar(int16_t i) {
+    return ::llvm::ConstantInt::getSigned(getJCharTy(), i);
+  }
+
+  ::llvm::ConstantInt* getJShort(int16_t i) {
+    return ::llvm::ConstantInt::getSigned(getJShortTy(), i);
+  }
+
+  ::llvm::ConstantInt* getJInt(int32_t i) {
+    return ::llvm::ConstantInt::getSigned(getJIntTy(), i);
+  }
+
+  ::llvm::ConstantInt* getJLong(int64_t i) {
+    return ::llvm::ConstantInt::getSigned(getJLongTy(), i);
+  }
+
+  ::llvm::Constant* getJFloat(float f) {
+    return ::llvm::ConstantFP::get(getJFloatTy(), f);
+  }
+
+  ::llvm::Constant* getJDouble(double d) {
+    return ::llvm::ConstantFP::get(getJDoubleTy(), d);
+  }
+
+  ::llvm::ConstantPointerNull* getJNull() {
+    return ::llvm::ConstantPointerNull::get(getJObjectTy());
+  }
+
+  ::llvm::Constant* getJZero(char shorty_jty) {
+    return getJZero(GetJTypeFromShorty(shorty_jty));
+  }
+
+  ::llvm::Constant* getJZero(JType jty) {
+    switch (jty) {
+    case kVoid:
+      LOG(FATAL) << "Zero is not a value of void type";
+      return NULL;
+
+    case kBoolean:
+      return getJBoolean(false);
+
+    case kByte:
+      return getJByte(0);
+
+    case kChar:
+      return getJChar(0);
+
+    case kShort:
+      return getJShort(0);
+
+    case kInt:
+      return getJInt(0);
+
+    case kLong:
+      return getJLong(0);
+
+    case kFloat:
+      return getJFloat(0.0f);
+
+    case kDouble:
+      return getJDouble(0.0);
+
+    case kObject:
+      return getJNull();
+
+    default:
+      LOG(FATAL) << "Unknown java type: " << jty;
+      return NULL;
+    }
+  }
+
+
+ private:
+  ::llvm::Module* module_;
+
+  MDBuilder mdb_;
+
+  ::llvm::PointerType* java_object_type_;
+  ::llvm::PointerType* java_method_type_;
+  ::llvm::PointerType* java_thread_type_;
+
+  ::llvm::PointerType* jenv_type_;
+
+  ::llvm::StructType* art_frame_type_;
+
+  RuntimeSupportBuilder* runtime_support_;
+
+  IntrinsicHelper& intrinsic_helper_;
+};
+
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_IR_BUILDER_H_
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
new file mode 100644
index 0000000..dfb5724
--- /dev/null
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO: TargetLibraryInfo is included before sys/... because on Android bionic does #define tricks like:
+//
+// #define  stat64    stat
+// #define  fstat64   fstat
+// #define  lstat64   lstat
+// 
+// which causes grief. bionic probably should not do that.
+#include <llvm/Target/TargetLibraryInfo.h>
+
+#include "llvm_compilation_unit.h"
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <string>
+
+#include <llvm/ADT/OwningPtr.h>
+#include <llvm/ADT/StringSet.h>
+#include <llvm/ADT/Triple.h>
+#include <llvm/Analysis/CallGraph.h>
+#include <llvm/Analysis/CallGraphSCCPass.h>
+#include <llvm/Analysis/Dominators.h>
+#include <llvm/Analysis/LoopInfo.h>
+#include <llvm/Analysis/LoopPass.h>
+#include <llvm/Analysis/RegionPass.h>
+#include <llvm/Analysis/ScalarEvolution.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Assembly/PrintModulePass.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/CodeGen/MachineFrameInfo.h>
+#include <llvm/CodeGen/MachineFunction.h>
+#include <llvm/CodeGen/MachineFunctionPass.h>
+#include <llvm/DebugInfo.h>
+#include <llvm/IR/DataLayout.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/Module.h>
+#include <llvm/Object/ObjectFile.h>
+#include <llvm/PassManager.h>
+#include <llvm/Support/Debug.h>
+#include <llvm/Support/ELF.h>
+#include <llvm/Support/FormattedStream.h>
+#include <llvm/Support/ManagedStatic.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/Support/PassNameParser.h>
+#include <llvm/Support/PluginLoader.h>
+#include <llvm/Support/PrettyStackTrace.h>
+#include <llvm/Support/Signals.h>
+#include <llvm/Support/SystemUtils.h>
+#include <llvm/Support/TargetRegistry.h>
+#include <llvm/Support/TargetSelect.h>
+#include <llvm/Support/ToolOutputFile.h>
+#include <llvm/Support/raw_ostream.h>
+#include <llvm/Support/system_error.h>
+#include <llvm/Target/TargetMachine.h>
+#include <llvm/Transforms/IPO.h>
+#include <llvm/Transforms/IPO/PassManagerBuilder.h>
+#include <llvm/Transforms/Scalar.h>
+
+#include "base/logging.h"
+#include "base/unix_file/fd_file.h"
+#include "compiled_method.h"
+#include "compiler_llvm.h"
+#include "instruction_set.h"
+#include "ir_builder.h"
+#include "os.h"
+#include "runtime_support_builder_arm.h"
+#include "runtime_support_builder_thumb2.h"
+#include "runtime_support_builder_x86.h"
+#include "utils_llvm.h"
+
+namespace art {
+namespace llvm {
+
+::llvm::FunctionPass*
+CreateGBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
+                      CompilerDriver* compiler, const DexCompilationUnit* dex_compilation_unit);
+
+::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
+
+
+LlvmCompilationUnit::LlvmCompilationUnit(const CompilerLLVM* compiler_llvm, size_t cunit_id)
+    : compiler_llvm_(compiler_llvm), cunit_id_(cunit_id) {
+  driver_ = NULL;
+  dex_compilation_unit_ = NULL;
+  llvm_info_.reset(new LLVMInfo());
+  context_.reset(llvm_info_->GetLLVMContext());
+  module_ = llvm_info_->GetLLVMModule();
+
+  // Include the runtime function declaration
+  makeLLVMModuleContents(module_);
+
+
+  intrinsic_helper_.reset(new IntrinsicHelper(*context_, *module_));
+
+  // Create IRBuilder
+  irb_.reset(new IRBuilder(*context_, *module_, *intrinsic_helper_));
+
+  // We always need a switch case, so just use a normal function.
+  switch(GetInstructionSet()) {
+  default:
+    runtime_support_.reset(new RuntimeSupportBuilder(*context_, *module_, *irb_));
+    break;
+  case kArm:
+    runtime_support_.reset(new RuntimeSupportBuilderARM(*context_, *module_, *irb_));
+    break;
+  case kThumb2:
+    runtime_support_.reset(new RuntimeSupportBuilderThumb2(*context_, *module_, *irb_));
+    break;
+  case kX86:
+    runtime_support_.reset(new RuntimeSupportBuilderX86(*context_, *module_, *irb_));
+    break;
+  }
+
+  irb_->SetRuntimeSupport(runtime_support_.get());
+}
+
+
+LlvmCompilationUnit::~LlvmCompilationUnit() {
+  ::llvm::LLVMContext* llvm_context = context_.release(); // Managed by llvm_info_
+  CHECK(llvm_context != NULL);
+}
+
+
+InstructionSet LlvmCompilationUnit::GetInstructionSet() const {
+  return compiler_llvm_->GetInstructionSet();
+}
+
+
+static std::string DumpDirectory() {
+  if (kIsTargetBuild) {
+    return GetDalvikCacheOrDie(GetAndroidData());
+  }
+  return "/tmp";
+}
+
+void LlvmCompilationUnit::DumpBitcodeToFile() {
+  std::string bitcode;
+  DumpBitcodeToString(bitcode);
+  std::string filename(StringPrintf("%s/Art%u.bc", DumpDirectory().c_str(), cunit_id_));
+  UniquePtr<File> output(OS::OpenFile(filename.c_str(), true));
+  output->WriteFully(bitcode.data(), bitcode.size());
+  LOG(INFO) << ".bc file written successfully: " << filename;
+}
+
+void LlvmCompilationUnit::DumpBitcodeToString(std::string& str_buffer) {
+  ::llvm::raw_string_ostream str_os(str_buffer);
+  ::llvm::WriteBitcodeToFile(module_, str_os);
+}
+
+bool LlvmCompilationUnit::Materialize() {
+
+  const bool kDumpBitcode = false;
+  if (kDumpBitcode) {
+    // Dump the bitcode for debugging
+    DumpBitcodeToFile();
+  }
+
+  // Compile and prelink ::llvm::Module
+  if (!MaterializeToString(elf_object_)) {
+    LOG(ERROR) << "Failed to materialize compilation unit " << cunit_id_;
+    return false;
+  }
+
+  const bool kDumpELF = false;
+  if (kDumpELF) {
+    // Dump the ELF image for debugging
+    std::string filename(StringPrintf("%s/Art%u.o", DumpDirectory().c_str(), cunit_id_));
+    UniquePtr<File> output(OS::OpenFile(filename.c_str(), true));
+    output->WriteFully(elf_object_.data(), elf_object_.size());
+    LOG(INFO) << ".o file written successfully: " << filename;
+  }
+
+  return true;
+}
+
+
+bool LlvmCompilationUnit::MaterializeToString(std::string& str_buffer) {
+  ::llvm::raw_string_ostream str_os(str_buffer);
+  return MaterializeToRawOStream(str_os);
+}
+
+
+bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_stream) {
+  // Lookup the LLVM target
+  std::string target_triple;
+  std::string target_cpu;
+  std::string target_attr;
+  CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), target_triple, target_cpu, target_attr);
+
+  std::string errmsg;
+  const ::llvm::Target* target =
+    ::llvm::TargetRegistry::lookupTarget(target_triple, errmsg);
+
+  CHECK(target != NULL) << errmsg;
+
+  // Target options
+  ::llvm::TargetOptions target_options;
+  target_options.FloatABIType = ::llvm::FloatABI::Soft;
+  target_options.NoFramePointerElim = true;
+  target_options.NoFramePointerElimNonLeaf = true;
+  target_options.UseSoftFloat = false;
+  target_options.EnableFastISel = false;
+
+  // Create the ::llvm::TargetMachine
+  ::llvm::OwningPtr< ::llvm::TargetMachine> target_machine(
+    target->createTargetMachine(target_triple, target_cpu, target_attr, target_options,
+                                ::llvm::Reloc::Static, ::llvm::CodeModel::Small,
+                                ::llvm::CodeGenOpt::Aggressive));
+
+  CHECK(target_machine.get() != NULL) << "Failed to create target machine";
+
+  // Add target data
+  const ::llvm::DataLayout* data_layout = target_machine->getDataLayout();
+
+  // PassManager for code generation passes
+  ::llvm::PassManager pm;
+  pm.add(new ::llvm::DataLayout(*data_layout));
+
+  // FunctionPassManager for optimization pass
+  ::llvm::FunctionPassManager fpm(module_);
+  fpm.add(new ::llvm::DataLayout(*data_layout));
+
+  if (bitcode_filename_.empty()) {
+    // If we don't need write the bitcode to file, add the AddSuspendCheckToLoopLatchPass to the
+    // regular FunctionPass.
+    fpm.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
+                                  driver_, dex_compilation_unit_));
+  } else {
+    ::llvm::FunctionPassManager fpm2(module_);
+    fpm2.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
+                                   driver_, dex_compilation_unit_));
+    fpm2.doInitialization();
+    for (::llvm::Module::iterator F = module_->begin(), E = module_->end();
+         F != E; ++F) {
+      fpm2.run(*F);
+    }
+    fpm2.doFinalization();
+
+    // Write bitcode to file
+    std::string errmsg;
+
+    ::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
+      new ::llvm::tool_output_file(bitcode_filename_.c_str(), errmsg,
+                                 ::llvm::raw_fd_ostream::F_Binary));
+
+
+    if (!errmsg.empty()) {
+      LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
+      return false;
+    }
+
+    ::llvm::WriteBitcodeToFile(module_, out_file->os());
+    out_file->keep();
+  }
+
+  // Add optimization pass
+  ::llvm::PassManagerBuilder pm_builder;
+  // TODO: Use inliner after we can do IPO.
+  pm_builder.Inliner = NULL;
+  //pm_builder.Inliner = ::llvm::createFunctionInliningPass();
+  //pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
+  //pm_builder.Inliner = ::llvm::createPartialInliningPass();
+  pm_builder.OptLevel = 3;
+  pm_builder.DisableSimplifyLibCalls = 1;
+  pm_builder.DisableUnitAtATime = 1;
+  pm_builder.populateFunctionPassManager(fpm);
+  pm_builder.populateModulePassManager(pm);
+  pm.add(::llvm::createStripDeadPrototypesPass());
+
+  // Add passes to emit ELF image
+  {
+    ::llvm::formatted_raw_ostream formatted_os(out_stream, false);
+
+    // Ask the target to add backend passes as necessary.
+    if (target_machine->addPassesToEmitFile(pm,
+                                            formatted_os,
+                                            ::llvm::TargetMachine::CGFT_ObjectFile,
+                                            true)) {
+      LOG(FATAL) << "Unable to generate ELF for this target";
+      return false;
+    }
+
+    // Run the per-function optimization
+    fpm.doInitialization();
+    for (::llvm::Module::iterator F = module_->begin(), E = module_->end();
+         F != E; ++F) {
+      fpm.run(*F);
+    }
+    fpm.doFinalization();
+
+    // Run the code generation passes
+    pm.run(*module_);
+  }
+
+  return true;
+}
+
+// Check whether the align is less than or equal to the code alignment of
+// that architecture.  Since the Oat writer only guarantee that the compiled
+// method being aligned to kArchAlignment, we have no way to align the ELf
+// section if the section alignment is greater than kArchAlignment.
+void LlvmCompilationUnit::CheckCodeAlign(uint32_t align) const {
+  InstructionSet insn_set = GetInstructionSet();
+  switch (insn_set) {
+  case kThumb2:
+  case kArm:
+    CHECK_LE(align, static_cast<uint32_t>(kArmAlignment));
+    break;
+
+  case kX86:
+    CHECK_LE(align, static_cast<uint32_t>(kX86Alignment));
+    break;
+
+  case kMips:
+    CHECK_LE(align, static_cast<uint32_t>(kMipsAlignment));
+    break;
+
+  default:
+    LOG(FATAL) << "Unknown instruction set: " << insn_set;
+  }
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/llvm_compilation_unit.h b/compiler/llvm/llvm_compilation_unit.h
new file mode 100644
index 0000000..a4f0adb
--- /dev/null
+++ b/compiler/llvm/llvm_compilation_unit.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
+#define ART_SRC_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "dex/compiler_internals.h"
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "globals.h"
+#include "instruction_set.h"
+#include "runtime_support_builder.h"
+#include "runtime_support_llvm_func.h"
+#include "safe_map.h"
+
+#include <UniquePtr.h>
+#include <string>
+#include <vector>
+
+namespace art {
+  class CompiledMethod;
+}
+
+namespace llvm {
+  class Function;
+  class LLVMContext;
+  class Module;
+  class raw_ostream;
+}
+
+namespace art {
+namespace llvm {
+
+class CompilerLLVM;
+class IRBuilder;
+
+class LlvmCompilationUnit {
+ public:
+  ~LlvmCompilationUnit();
+
+  uint32_t GetCompilationUnitId() const {
+    return cunit_id_;
+  }
+
+  InstructionSet GetInstructionSet() const;
+
+  ::llvm::LLVMContext* GetLLVMContext() const {
+    return context_.get();
+  }
+
+  ::llvm::Module* GetModule() const {
+    return module_;
+  }
+
+  IRBuilder* GetIRBuilder() const {
+    return irb_.get();
+  }
+
+  void SetBitcodeFileName(const std::string& bitcode_filename) {
+    bitcode_filename_ = bitcode_filename;
+  }
+
+  LLVMInfo* GetQuickContext() const {
+    return llvm_info_.get();
+  }
+  void SetCompilerDriver(CompilerDriver* driver) {
+    driver_ = driver;
+  }
+  DexCompilationUnit* GetDexCompilationUnit() {
+    return dex_compilation_unit_;
+  }
+  void SetDexCompilationUnit(DexCompilationUnit* dex_compilation_unit) {
+    dex_compilation_unit_ = dex_compilation_unit;
+  }
+
+  bool Materialize();
+
+  bool IsMaterialized() const {
+    return !elf_object_.empty();
+  }
+
+  const std::string& GetElfObject() const {
+    DCHECK(IsMaterialized());
+    return elf_object_;
+  }
+
+ private:
+  LlvmCompilationUnit(const CompilerLLVM* compiler_llvm,
+                      uint32_t cunit_id);
+
+  const CompilerLLVM* compiler_llvm_;
+  const uint32_t cunit_id_;
+
+  UniquePtr< ::llvm::LLVMContext> context_;
+  UniquePtr<IRBuilder> irb_;
+  UniquePtr<RuntimeSupportBuilder> runtime_support_;
+  ::llvm::Module* module_; // Managed by context_
+  UniquePtr<IntrinsicHelper> intrinsic_helper_;
+  UniquePtr<LLVMInfo> llvm_info_;
+  CompilerDriver* driver_;
+  DexCompilationUnit* dex_compilation_unit_;
+
+  std::string bitcode_filename_;
+
+  std::string elf_object_;
+
+  SafeMap<const ::llvm::Function*, CompiledMethod*> compiled_methods_map_;
+
+  void CheckCodeAlign(uint32_t offset) const;
+
+  void DumpBitcodeToFile();
+  void DumpBitcodeToString(std::string& str_buffer);
+
+  bool MaterializeToString(std::string& str_buffer);
+  bool MaterializeToRawOStream(::llvm::raw_ostream& out_stream);
+
+  friend class CompilerLLVM;  // For LlvmCompilationUnit constructor
+};
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
diff --git a/compiler/llvm/md_builder.cc b/compiler/llvm/md_builder.cc
new file mode 100644
index 0000000..3884f51
--- /dev/null
+++ b/compiler/llvm/md_builder.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "md_builder.h"
+
+#include "llvm/IR/MDBuilder.h"
+
+#include <string>
+
+namespace art {
+namespace llvm {
+
+
+::llvm::MDNode* MDBuilder::GetTBAASpecialType(TBAASpecialType sty_id) {
+  DCHECK_GE(sty_id, 0) << "Unknown TBAA special type: " << sty_id;
+  DCHECK_LT(sty_id, MAX_TBAA_SPECIAL_TYPE) << "Unknown TBAA special type: " << sty_id;
+  DCHECK(tbaa_root_ != NULL);
+
+  ::llvm::MDNode*& spec_ty = tbaa_special_type_[sty_id];
+  if (spec_ty == NULL) {
+    switch (sty_id) {
+    case kTBAARegister:     spec_ty = createTBAANode("Register", tbaa_root_); break;
+    case kTBAAStackTemp:    spec_ty = createTBAANode("StackTemp", tbaa_root_); break;
+    case kTBAAHeapArray:    spec_ty = createTBAANode("HeapArray", tbaa_root_); break;
+    case kTBAAHeapInstance: spec_ty = createTBAANode("HeapInstance", tbaa_root_); break;
+    case kTBAAHeapStatic:   spec_ty = createTBAANode("HeapStatic", tbaa_root_); break;
+    case kTBAAJRuntime:     spec_ty = createTBAANode("JRuntime", tbaa_root_); break;
+    case kTBAARuntimeInfo:  spec_ty = createTBAANode("RuntimeInfo",
+                                                     GetTBAASpecialType(kTBAAJRuntime)); break;
+    case kTBAAShadowFrame:  spec_ty = createTBAANode("ShadowFrame",
+                                                     GetTBAASpecialType(kTBAAJRuntime)); break;
+    case kTBAAConstJObject: spec_ty = createTBAANode("ConstJObject", tbaa_root_, true); break;
+    default:
+      LOG(FATAL) << "Unknown TBAA special type: " << sty_id;
+      break;
+    }
+  }
+  return spec_ty;
+}
+
+::llvm::MDNode* MDBuilder::GetTBAAMemoryJType(TBAASpecialType sty_id, JType jty_id) {
+  DCHECK(sty_id == kTBAAHeapArray ||
+         sty_id == kTBAAHeapInstance ||
+         sty_id == kTBAAHeapStatic) << "SpecialType must be array, instance, or static";
+
+  DCHECK_GE(jty_id, 0) << "Unknown JType: " << jty_id;
+  DCHECK_LT(jty_id, MAX_JTYPE) << "Unknown JType: " << jty_id;
+  DCHECK_NE(jty_id, kVoid) << "Can't load/store Void type!";
+
+  std::string name;
+  size_t sty_mapped_index = 0;
+  switch (sty_id) {
+  case kTBAAHeapArray:    sty_mapped_index = 0; name = "HeapArray "; break;
+  case kTBAAHeapInstance: sty_mapped_index = 1; name = "HeapInstance "; break;
+  case kTBAAHeapStatic:   sty_mapped_index = 2; name = "HeapStatic "; break;
+  default:
+    LOG(FATAL) << "Unknown TBAA special type: " << sty_id;
+    break;
+  }
+
+  ::llvm::MDNode*& spec_ty = tbaa_memory_jtype_[sty_mapped_index][jty_id];
+  if (spec_ty != NULL) {
+    return spec_ty;
+  }
+
+  switch (jty_id) {
+  case kBoolean: name += "Boolean"; break;
+  case kByte:    name += "Byte"; break;
+  case kChar:    name += "Char"; break;
+  case kShort:   name += "Short"; break;
+  case kInt:     name += "Int"; break;
+  case kLong:    name += "Long"; break;
+  case kFloat:   name += "Float"; break;
+  case kDouble:  name += "Double"; break;
+  case kObject:  name += "Object"; break;
+  default:
+    LOG(FATAL) << "Unknown JType: " << jty_id;
+    break;
+  }
+
+  spec_ty = createTBAANode(name, GetTBAASpecialType(sty_id));
+  return spec_ty;
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/md_builder.h b/compiler/llvm/md_builder.h
new file mode 100644
index 0000000..79a7caa
--- /dev/null
+++ b/compiler/llvm/md_builder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_MD_BUILDER_H_
+#define ART_SRC_COMPILER_LLVM_MD_BUILDER_H_
+
+#include "backend_types.h"
+
+#include "llvm/IR/MDBuilder.h"
+
+#include <cstring>
+
+namespace llvm {
+  class LLVMContext;
+  class MDNode;
+}
+
+namespace art {
+namespace llvm {
+
+typedef ::llvm::MDBuilder LLVMMDBuilder;
+
+class MDBuilder : public LLVMMDBuilder {
+ public:
+  MDBuilder(::llvm::LLVMContext& context)
+     : LLVMMDBuilder(context), tbaa_root_(createTBAARoot("Art TBAA Root")) {
+    std::memset(tbaa_special_type_, 0, sizeof(tbaa_special_type_));
+    std::memset(tbaa_memory_jtype_, 0, sizeof(tbaa_memory_jtype_));
+
+    // Pre-generate the MDNode for static branch prediction
+    // 64 and 4 are the llvm.expect's default values
+    expect_cond_[kLikely] = createBranchWeights(64, 4);
+    expect_cond_[kUnlikely] = createBranchWeights(4, 64);
+  }
+
+  ::llvm::MDNode* GetTBAASpecialType(TBAASpecialType special_ty);
+  ::llvm::MDNode* GetTBAAMemoryJType(TBAASpecialType special_ty, JType j_ty);
+
+  ::llvm::MDNode* GetBranchWeights(ExpectCond expect) {
+    DCHECK_LT(expect, MAX_EXPECT) << "MAX_EXPECT is not for branch weight";
+    return expect_cond_[expect];
+  }
+
+ private:
+  ::llvm::MDNode* const tbaa_root_;
+  ::llvm::MDNode* tbaa_special_type_[MAX_TBAA_SPECIAL_TYPE];
+  // There are 3 categories of memory types will not alias: array element, instance field, and
+  // static field.
+  ::llvm::MDNode* tbaa_memory_jtype_[3][MAX_JTYPE];
+
+  ::llvm::MDNode* expect_cond_[MAX_EXPECT];
+};
+
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_MD_BUILDER_H_
diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc
new file mode 100644
index 0000000..28405f6
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder.cc
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_support_builder.h"
+
+#include "gc/accounting/card_table.h"
+#include "ir_builder.h"
+#include "monitor.h"
+#include "mirror/object.h"
+#include "thread.h"
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+using namespace llvm;
+
+namespace art {
+namespace llvm {
+
+using namespace runtime_support;
+
+
+RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context,
+                                             ::llvm::Module& module,
+                                             IRBuilder& irb)
+    : context_(context), module_(module), irb_(irb)
+{
+  memset(target_runtime_support_func_, 0, sizeof(target_runtime_support_func_));
+#define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \
+  do { \
+    ::llvm::Function* fn = module_.getFunction(#NAME); \
+    DCHECK_NE(fn, (void*)NULL) << "Function not found: " << #NAME; \
+    runtime_support_func_decls_[runtime_support::ID] = fn; \
+  } while (0);
+
+#include "runtime_support_llvm_func_list.h"
+  RUNTIME_SUPPORT_FUNC_LIST(GET_RUNTIME_SUPPORT_FUNC_DECL)
+#undef RUNTIME_SUPPORT_FUNC_LIST
+#undef GET_RUNTIME_SUPPORT_FUNC_DECL
+}
+
+
+/* Thread */
+
+::llvm::Value* RuntimeSupportBuilder::EmitGetCurrentThread() {
+  Function* func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
+  CallInst* call_inst = irb_.CreateCall(func);
+  call_inst->setOnlyReadsMemory();
+  irb_.SetTBAA(call_inst, kTBAAConstJObject);
+  return call_inst;
+}
+
+::llvm::Value* RuntimeSupportBuilder::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                             TBAASpecialType s_ty) {
+  Value* thread = EmitGetCurrentThread();
+  return irb_.LoadFromObjectOffset(thread, offset, type, s_ty);
+}
+
+void RuntimeSupportBuilder::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                                    TBAASpecialType s_ty) {
+  Value* thread = EmitGetCurrentThread();
+  irb_.StoreToObjectOffset(thread, offset, value, s_ty);
+}
+
+::llvm::Value* RuntimeSupportBuilder::EmitSetCurrentThread(::llvm::Value* thread) {
+  Function* func = GetRuntimeSupportFunction(runtime_support::SetCurrentThread);
+  return irb_.CreateCall(func, thread);
+}
+
+
+/* ShadowFrame */
+
+::llvm::Value* RuntimeSupportBuilder::EmitPushShadowFrame(::llvm::Value* new_shadow_frame,
+                                                        ::llvm::Value* method,
+                                                        uint32_t num_vregs) {
+  Value* old_shadow_frame = EmitLoadFromThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
+                                                     irb_.getArtFrameTy()->getPointerTo(),
+                                                     kTBAARuntimeInfo);
+  EmitStoreToThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
+                          new_shadow_frame,
+                          kTBAARuntimeInfo);
+
+  // Store the method pointer
+  irb_.StoreToObjectOffset(new_shadow_frame,
+                           ShadowFrame::MethodOffset(),
+                           method,
+                           kTBAAShadowFrame);
+
+  // Store the number of vregs
+  irb_.StoreToObjectOffset(new_shadow_frame,
+                           ShadowFrame::NumberOfVRegsOffset(),
+                           irb_.getInt32(num_vregs),
+                           kTBAAShadowFrame);
+
+  // Store the link to previous shadow frame
+  irb_.StoreToObjectOffset(new_shadow_frame,
+                           ShadowFrame::LinkOffset(),
+                           old_shadow_frame,
+                           kTBAAShadowFrame);
+
+  return old_shadow_frame;
+}
+
+::llvm::Value*
+RuntimeSupportBuilder::EmitPushShadowFrameNoInline(::llvm::Value* new_shadow_frame,
+                                                   ::llvm::Value* method,
+                                                   uint32_t num_vregs) {
+  Function* func = GetRuntimeSupportFunction(runtime_support::PushShadowFrame);
+  ::llvm::CallInst* call_inst =
+      irb_.CreateCall4(func,
+                       EmitGetCurrentThread(),
+                       new_shadow_frame,
+                       method,
+                       irb_.getInt32(num_vregs));
+  irb_.SetTBAA(call_inst, kTBAARuntimeInfo);
+  return call_inst;
+}
+
+void RuntimeSupportBuilder::EmitPopShadowFrame(::llvm::Value* old_shadow_frame) {
+  // Store old shadow frame to TopShadowFrame
+  EmitStoreToThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
+                          old_shadow_frame,
+                          kTBAARuntimeInfo);
+}
+
+
+/* Exception */
+
+::llvm::Value* RuntimeSupportBuilder::EmitGetAndClearException() {
+  Function* slow_func = GetRuntimeSupportFunction(runtime_support::GetAndClearException);
+  return irb_.CreateCall(slow_func, EmitGetCurrentThread());
+}
+
+::llvm::Value* RuntimeSupportBuilder::EmitIsExceptionPending() {
+  Value* exception = EmitLoadFromThreadOffset(Thread::ExceptionOffset().Int32Value(),
+                                              irb_.getJObjectTy(),
+                                              kTBAARuntimeInfo);
+  // If exception not null
+  return irb_.CreateIsNotNull(exception);
+}
+
+
+/* Suspend */
+
+void RuntimeSupportBuilder::EmitTestSuspend() {
+  Function* slow_func = GetRuntimeSupportFunction(runtime_support::TestSuspend);
+  CallInst* call_inst = irb_.CreateCall(slow_func, EmitGetCurrentThread());
+  irb_.SetTBAA(call_inst, kTBAAJRuntime);
+}
+
+
+/* Monitor */
+
+void RuntimeSupportBuilder::EmitLockObject(::llvm::Value* object) {
+  Value* monitor =
+      irb_.LoadFromObjectOffset(object,
+                                mirror::Object::MonitorOffset().Int32Value(),
+                                irb_.getJIntTy(),
+                                kTBAARuntimeInfo);
+
+  Value* real_monitor =
+      irb_.CreateAnd(monitor, ~(LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+
+  // Is thin lock, unheld and not recursively acquired.
+  Value* unheld = irb_.CreateICmpEQ(real_monitor, irb_.getInt32(0));
+
+  Function* parent_func = irb_.GetInsertBlock()->getParent();
+  BasicBlock* bb_fast = BasicBlock::Create(context_, "lock_fast", parent_func);
+  BasicBlock* bb_slow = BasicBlock::Create(context_, "lock_slow", parent_func);
+  BasicBlock* bb_cont = BasicBlock::Create(context_, "lock_cont", parent_func);
+  irb_.CreateCondBr(unheld, bb_fast, bb_slow, kLikely);
+
+  irb_.SetInsertPoint(bb_fast);
+
+  // Calculate new monitor: new = old | (lock_id << LW_LOCK_OWNER_SHIFT)
+  Value* lock_id =
+      EmitLoadFromThreadOffset(Thread::ThinLockIdOffset().Int32Value(),
+                               irb_.getInt32Ty(), kTBAARuntimeInfo);
+
+  Value* owner = irb_.CreateShl(lock_id, LW_LOCK_OWNER_SHIFT);
+  Value* new_monitor = irb_.CreateOr(monitor, owner);
+
+  // Atomically update monitor.
+  Value* old_monitor =
+      irb_.CompareExchangeObjectOffset(object,
+                                       mirror::Object::MonitorOffset().Int32Value(),
+                                       monitor, new_monitor, kTBAARuntimeInfo);
+
+  Value* retry_slow_path = irb_.CreateICmpEQ(old_monitor, monitor);
+  irb_.CreateCondBr(retry_slow_path, bb_cont, bb_slow, kLikely);
+
+  irb_.SetInsertPoint(bb_slow);
+  Function* slow_func = GetRuntimeSupportFunction(runtime_support::LockObject);
+  irb_.CreateCall2(slow_func, object, EmitGetCurrentThread());
+  irb_.CreateBr(bb_cont);
+
+  irb_.SetInsertPoint(bb_cont);
+}
+
+void RuntimeSupportBuilder::EmitUnlockObject(::llvm::Value* object) {
+  Value* lock_id =
+      EmitLoadFromThreadOffset(Thread::ThinLockIdOffset().Int32Value(),
+                               irb_.getJIntTy(),
+                               kTBAARuntimeInfo);
+  Value* monitor =
+      irb_.LoadFromObjectOffset(object,
+                                mirror::Object::MonitorOffset().Int32Value(),
+                                irb_.getJIntTy(),
+                                kTBAARuntimeInfo);
+
+  Value* my_monitor = irb_.CreateShl(lock_id, LW_LOCK_OWNER_SHIFT);
+  Value* hash_state = irb_.CreateAnd(monitor, (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+  Value* real_monitor = irb_.CreateAnd(monitor, ~(LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+
+  // Is thin lock, held by us and not recursively acquired
+  Value* is_fast_path = irb_.CreateICmpEQ(real_monitor, my_monitor);
+
+  Function* parent_func = irb_.GetInsertBlock()->getParent();
+  BasicBlock* bb_fast = BasicBlock::Create(context_, "unlock_fast", parent_func);
+  BasicBlock* bb_slow = BasicBlock::Create(context_, "unlock_slow", parent_func);
+  BasicBlock* bb_cont = BasicBlock::Create(context_, "unlock_cont", parent_func);
+  irb_.CreateCondBr(is_fast_path, bb_fast, bb_slow, kLikely);
+
+  irb_.SetInsertPoint(bb_fast);
+  // Set all bits to zero (except hash state)
+  irb_.StoreToObjectOffset(object,
+                           mirror::Object::MonitorOffset().Int32Value(),
+                           hash_state,
+                           kTBAARuntimeInfo);
+  irb_.CreateBr(bb_cont);
+
+  irb_.SetInsertPoint(bb_slow);
+  Function* slow_func = GetRuntimeSupportFunction(runtime_support::UnlockObject);
+  irb_.CreateCall2(slow_func, object, EmitGetCurrentThread());
+  irb_.CreateBr(bb_cont);
+
+  irb_.SetInsertPoint(bb_cont);
+}
+
+
+void RuntimeSupportBuilder::EmitMarkGCCard(::llvm::Value* value, ::llvm::Value* target_addr) {
+  Function* parent_func = irb_.GetInsertBlock()->getParent();
+  BasicBlock* bb_mark_gc_card = BasicBlock::Create(context_, "mark_gc_card", parent_func);
+  BasicBlock* bb_cont = BasicBlock::Create(context_, "mark_gc_card_cont", parent_func);
+
+  ::llvm::Value* not_null = irb_.CreateIsNotNull(value);
+  irb_.CreateCondBr(not_null, bb_mark_gc_card, bb_cont);
+
+  irb_.SetInsertPoint(bb_mark_gc_card);
+  Value* card_table = EmitLoadFromThreadOffset(Thread::CardTableOffset().Int32Value(),
+                                               irb_.getInt8Ty()->getPointerTo(),
+                                               kTBAAConstJObject);
+  Value* target_addr_int = irb_.CreatePtrToInt(target_addr, irb_.getPtrEquivIntTy());
+  Value* card_no = irb_.CreateLShr(target_addr_int,
+                                   irb_.getPtrEquivInt(gc::accounting::CardTable::kCardShift));
+  Value* card_table_entry = irb_.CreateGEP(card_table, card_no);
+  irb_.CreateStore(irb_.getInt8(gc::accounting::CardTable::kCardDirty), card_table_entry,
+                   kTBAARuntimeInfo);
+  irb_.CreateBr(bb_cont);
+
+  irb_.SetInsertPoint(bb_cont);
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/runtime_support_builder.h b/compiler/llvm/runtime_support_builder.h
new file mode 100644
index 0000000..267b406
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
+#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
+
+#include "backend_types.h"
+#include "base/logging.h"
+#include "runtime_support_llvm_func.h"
+
+#include <stdint.h>
+
+namespace llvm {
+  class LLVMContext;
+  class Module;
+  class Function;
+  class Type;
+  class Value;
+}
+
+namespace art {
+namespace llvm {
+
+class IRBuilder;
+
+
+class RuntimeSupportBuilder {
+ public:
+  RuntimeSupportBuilder(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb);
+
+  /* Thread */
+  virtual ::llvm::Value* EmitGetCurrentThread();
+  virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                TBAASpecialType s_ty);
+  virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                       TBAASpecialType s_ty);
+  virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
+
+  /* ShadowFrame */
+  virtual ::llvm::Value* EmitPushShadowFrame(::llvm::Value* new_shadow_frame,
+                                           ::llvm::Value* method, uint32_t num_vregs);
+  virtual ::llvm::Value* EmitPushShadowFrameNoInline(::llvm::Value* new_shadow_frame,
+                                                   ::llvm::Value* method, uint32_t num_vregs);
+  virtual void EmitPopShadowFrame(::llvm::Value* old_shadow_frame);
+
+  /* Exception */
+  virtual ::llvm::Value* EmitGetAndClearException();
+  virtual ::llvm::Value* EmitIsExceptionPending();
+
+  /* Suspend */
+  virtual void EmitTestSuspend();
+
+  /* Monitor */
+  virtual void EmitLockObject(::llvm::Value* object);
+  virtual void EmitUnlockObject(::llvm::Value* object);
+
+  /* MarkGCCard */
+  virtual void EmitMarkGCCard(::llvm::Value* value, ::llvm::Value* target_addr);
+
+  ::llvm::Function* GetRuntimeSupportFunction(runtime_support::RuntimeId id) {
+    if (id >= 0 && id < runtime_support::MAX_ID) {
+      return runtime_support_func_decls_[id];
+    } else {
+      LOG(ERROR) << "Unknown runtime function id: " << id;
+      return NULL;
+    }
+  }
+
+  virtual ~RuntimeSupportBuilder() {}
+
+ protected:
+  ::llvm::LLVMContext& context_;
+  ::llvm::Module& module_;
+  IRBuilder& irb_;
+
+ private:
+  ::llvm::Function* runtime_support_func_decls_[runtime_support::MAX_ID];
+  bool target_runtime_support_func_[runtime_support::MAX_ID];
+};
+
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
diff --git a/compiler/llvm/runtime_support_builder_arm.cc b/compiler/llvm/runtime_support_builder_arm.cc
new file mode 100644
index 0000000..57a9971
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_arm.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_support_builder_arm.h"
+
+#include "ir_builder.h"
+#include "thread.h"
+#include "utils_llvm.h"
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/InlineAsm.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+#include <vector>
+
+using namespace llvm;
+
+namespace {
+
+char LDRSTRSuffixByType(art::llvm::IRBuilder& irb, ::llvm::Type* type) {
+  int width = type->isPointerTy() ?
+              irb.getSizeOfPtrEquivInt()*8 :
+              ::llvm::cast<IntegerType>(type)->getBitWidth();
+  switch (width) {
+    case 8:  return 'b';
+    case 16: return 'h';
+    case 32: return ' ';
+    default:
+      LOG(FATAL) << "Unsupported width: " << width;
+      return ' ';
+  }
+}
+
+} // namespace
+
+namespace art {
+namespace llvm {
+
+/* Thread */
+
+::llvm::Value* RuntimeSupportBuilderARM::EmitGetCurrentThread() {
+  Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
+  InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), "mov $0, r9", "=r", false);
+  CallInst* thread = irb_.CreateCall(func);
+  thread->setDoesNotAccessMemory();
+  irb_.SetTBAA(thread, kTBAAConstJObject);
+  return thread;
+}
+
+::llvm::Value* RuntimeSupportBuilderARM::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                                TBAASpecialType s_ty) {
+  FunctionType* func_ty = FunctionType::get(/*Result=*/type,
+                                            /*isVarArg=*/false);
+  std::string inline_asm(StringPrintf("ldr%c $0, [r9, #%d]",
+                                      LDRSTRSuffixByType(irb_, type),
+                                      static_cast<int>(offset)));
+  InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "=r", true);
+  CallInst* result = irb_.CreateCall(func);
+  result->setOnlyReadsMemory();
+  irb_.SetTBAA(result, s_ty);
+  return result;
+}
+
+void RuntimeSupportBuilderARM::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                                       TBAASpecialType s_ty) {
+  FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                            /*Params=*/value->getType(),
+                                            /*isVarArg=*/false);
+  std::string inline_asm(StringPrintf("str%c $0, [r9, #%d]",
+                                      LDRSTRSuffixByType(irb_, value->getType()),
+                                      static_cast<int>(offset)));
+  InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "r", true);
+  CallInst* call_inst = irb_.CreateCall(func, value);
+  irb_.SetTBAA(call_inst, s_ty);
+}
+
+::llvm::Value*
+RuntimeSupportBuilderARM::EmitSetCurrentThread(::llvm::Value* thread) {
+  // Separate to two InlineAsm: The first one produces the return value, while the second,
+  // sets the current thread.
+  // LLVM can delete the first one if the caller in LLVM IR doesn't use the return value.
+  //
+  // Here we don't call EmitGetCurrentThread, because we mark it as DoesNotAccessMemory and
+  // ConstJObject. We denote side effect to "true" below instead, so LLVM won't
+  // reorder these instructions incorrectly.
+  Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
+  InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), "mov $0, r9", "=r", true);
+  CallInst* old_thread_register = irb_.CreateCall(func);
+  old_thread_register->setOnlyReadsMemory();
+
+  FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                            /*Params=*/irb_.getJObjectTy(),
+                                            /*isVarArg=*/false);
+  func = InlineAsm::get(func_ty, "mov r9, $0", "r", true);
+  irb_.CreateCall(func, thread);
+  return old_thread_register;
+}
+
+
+/* Monitor */
+
+void RuntimeSupportBuilderARM::EmitLockObject(::llvm::Value* object) {
+  RuntimeSupportBuilder::EmitLockObject(object);
+  FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                            /*isVarArg=*/false);
+  InlineAsm* func = InlineAsm::get(func_ty, "dmb sy", "", true);
+  irb_.CreateCall(func);
+}
+
+void RuntimeSupportBuilderARM::EmitUnlockObject(::llvm::Value* object) {
+  RuntimeSupportBuilder::EmitUnlockObject(object);
+  FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                            /*isVarArg=*/false);
+  InlineAsm* func = InlineAsm::get(func_ty, "dmb sy", "", true);
+  irb_.CreateCall(func);
+}
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/runtime_support_builder_arm.h b/compiler/llvm/runtime_support_builder_arm.h
new file mode 100644
index 0000000..3c5972f
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_arm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
+#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
+
+#include "runtime_support_builder.h"
+
+namespace art {
+namespace llvm {
+
+class RuntimeSupportBuilderARM : public RuntimeSupportBuilder {
+ public:
+  RuntimeSupportBuilderARM(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb)
+    : RuntimeSupportBuilder(context, module, irb) {}
+
+  /* Thread */
+  virtual ::llvm::Value* EmitGetCurrentThread();
+  virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                TBAASpecialType s_ty);
+  virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                       TBAASpecialType s_ty);
+  virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
+
+  /* Monitor */
+  virtual void EmitLockObject(::llvm::Value* object);
+  virtual void EmitUnlockObject(::llvm::Value* object);
+};
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
diff --git a/compiler/llvm/runtime_support_builder_thumb2.cc b/compiler/llvm/runtime_support_builder_thumb2.cc
new file mode 100644
index 0000000..2b9170c
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_thumb2.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_support_builder_thumb2.h"
+
+#include "ir_builder.h"
+#include "mirror/object.h"
+#include "monitor.h"
+#include "thread.h"
+#include "utils_llvm.h"
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/InlineAsm.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+#include <inttypes.h>
+#include <vector>
+
+using namespace llvm;
+
+namespace art {
+namespace llvm {
+
+
+void RuntimeSupportBuilderThumb2::EmitLockObject(::llvm::Value* object) {
+  FunctionType* func_ty = FunctionType::get(/*Result=*/irb_.getInt32Ty(),
+                                            /*Params=*/irb_.getJObjectTy(),
+                                            /*isVarArg=*/false);
+  // $0: result
+  // $1: object
+  // $2: temp
+  // $3: temp
+  std::string asms;
+  StringAppendF(&asms, "add $3, $1, #%"PRId32"\n", mirror::Object::MonitorOffset().Int32Value());
+  StringAppendF(&asms, "ldr $2, [r9, #%"PRId32"]\n", Thread::ThinLockIdOffset().Int32Value());
+  StringAppendF(&asms, "ldrex $0, [$3]\n");
+  StringAppendF(&asms, "lsl $2, $2, %d\n", LW_LOCK_OWNER_SHIFT);
+  StringAppendF(&asms, "bfi $2, $0, #0, #%d\n", LW_LOCK_OWNER_SHIFT - 1);
+  StringAppendF(&asms, "bfc $0, #%d, #%d\n", LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+  StringAppendF(&asms, "cmp $0, #0\n");
+  StringAppendF(&asms, "it eq\n");
+  StringAppendF(&asms, "strexeq $0, $2, [$3]\n");
+
+  InlineAsm* func = InlineAsm::get(func_ty, asms, "=&l,l,~l,~l", true);
+
+  ::llvm::Value* retry_slow_path = irb_.CreateCall(func, object);
+  retry_slow_path = irb_.CreateICmpNE(retry_slow_path, irb_.getJInt(0));
+
+  ::llvm::Function* parent_func = irb_.GetInsertBlock()->getParent();
+  BasicBlock* basic_block_lock = BasicBlock::Create(context_, "lock", parent_func);
+  BasicBlock* basic_block_cont = BasicBlock::Create(context_, "lock_cont", parent_func);
+  irb_.CreateCondBr(retry_slow_path, basic_block_lock, basic_block_cont, kUnlikely);
+
+  irb_.SetInsertPoint(basic_block_lock);
+  Function* slow_func = GetRuntimeSupportFunction(runtime_support::LockObject);
+  irb_.CreateCall2(slow_func, object, EmitGetCurrentThread());
+  irb_.CreateBr(basic_block_cont);
+
+  irb_.SetInsertPoint(basic_block_cont);
+  { // Memory barrier
+    FunctionType* asm_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                              /*isVarArg=*/false);
+    InlineAsm* func = InlineAsm::get(asm_ty, "dmb sy", "", true);
+    irb_.CreateCall(func);
+  }
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/runtime_support_builder_thumb2.h b/compiler/llvm/runtime_support_builder_thumb2.h
new file mode 100644
index 0000000..4762a26
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_thumb2.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_THUMB2_H_
+#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_THUMB2_H_
+
+#include "runtime_support_builder_arm.h"
+
+namespace art {
+namespace llvm {
+
+class RuntimeSupportBuilderThumb2 : public RuntimeSupportBuilderARM {
+ public:
+  RuntimeSupportBuilderThumb2(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb)
+    : RuntimeSupportBuilderARM(context, module, irb) {}
+
+  /* Monitor */
+  virtual void EmitLockObject(::llvm::Value* object);
+};
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_THUMB2_H_
diff --git a/compiler/llvm/runtime_support_builder_x86.cc b/compiler/llvm/runtime_support_builder_x86.cc
new file mode 100644
index 0000000..eed0b63
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_x86.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_support_builder_x86.h"
+
+#include "base/stringprintf.h"
+#include "ir_builder.h"
+#include "thread.h"
+#include "utils_llvm.h"
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/InlineAsm.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+#include <vector>
+
+using namespace llvm;
+
+namespace art {
+namespace llvm {
+
+
+::llvm::Value* RuntimeSupportBuilderX86::EmitGetCurrentThread() {
+  Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
+  std::string inline_asm(StringPrintf("mov %%fs:%d, $0", Thread::SelfOffset().Int32Value()));
+  InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), inline_asm, "=r", false);
+  CallInst* thread = irb_.CreateCall(func);
+  thread->setDoesNotAccessMemory();
+  irb_.SetTBAA(thread, kTBAAConstJObject);
+  return thread;
+}
+
+::llvm::Value* RuntimeSupportBuilderX86::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                                TBAASpecialType s_ty) {
+  FunctionType* func_ty = FunctionType::get(/*Result=*/type,
+                                            /*isVarArg=*/false);
+  std::string inline_asm(StringPrintf("mov %%fs:%d, $0", static_cast<int>(offset)));
+  InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "=r", true);
+  CallInst* result = irb_.CreateCall(func);
+  result->setOnlyReadsMemory();
+  irb_.SetTBAA(result, s_ty);
+  return result;
+}
+
+void RuntimeSupportBuilderX86::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                                       TBAASpecialType s_ty) {
+  FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
+                                            /*Params=*/value->getType(),
+                                            /*isVarArg=*/false);
+  std::string inline_asm(StringPrintf("mov $0, %%fs:%d", static_cast<int>(offset)));
+  InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "r", true);
+  CallInst* call_inst = irb_.CreateCall(func, value);
+  irb_.SetTBAA(call_inst, s_ty);
+}
+
+::llvm::Value* RuntimeSupportBuilderX86::EmitSetCurrentThread(::llvm::Value*) {
+  /* Nothing to be done. */
+  return ::llvm::UndefValue::get(irb_.getJObjectTy());
+}
+
+
+} // namespace llvm
+} // namespace art
diff --git a/compiler/llvm/runtime_support_builder_x86.h b/compiler/llvm/runtime_support_builder_x86.h
new file mode 100644
index 0000000..e5fdbc2
--- /dev/null
+++ b/compiler/llvm/runtime_support_builder_x86.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
+#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
+
+#include "runtime_support_builder.h"
+
+namespace art {
+namespace llvm {
+
+class RuntimeSupportBuilderX86 : public RuntimeSupportBuilder {
+ public:
+  RuntimeSupportBuilderX86(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb)
+    : RuntimeSupportBuilder(context, module, irb) {}
+
+  /* Thread */
+  virtual ::llvm::Value* EmitGetCurrentThread();
+  virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                TBAASpecialType s_ty);
+  virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+                                       TBAASpecialType s_ty);
+  virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
+};
+
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
diff --git a/compiler/llvm/runtime_support_llvm_func.h b/compiler/llvm/runtime_support_llvm_func.h
new file mode 100644
index 0000000..ac6f3b8
--- /dev/null
+++ b/compiler/llvm/runtime_support_llvm_func.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_FUNC_H_
+#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_FUNC_H_
+
+namespace art {
+namespace llvm {
+namespace runtime_support {
+
+  enum RuntimeId {
+#define DEFINE_RUNTIME_SUPPORT_FUNC_ID(ID, NAME) ID,
+#include "runtime_support_llvm_func_list.h"
+    RUNTIME_SUPPORT_FUNC_LIST(DEFINE_RUNTIME_SUPPORT_FUNC_ID)
+#undef RUNTIME_SUPPORT_FUNC_LIST
+#undef DEFINE_RUNTIME_SUPPORT_FUNC_ID
+
+    MAX_ID
+  };
+
+} // namespace runtime_support
+} // namespace llvm
+} // namespace art
+
+#endif // ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_FUNC_H_
diff --git a/compiler/llvm/tools/gen_art_module_cc.sh b/compiler/llvm/tools/gen_art_module_cc.sh
new file mode 100755
index 0000000..c5df333
--- /dev/null
+++ b/compiler/llvm/tools/gen_art_module_cc.sh
@@ -0,0 +1,50 @@
+#!/bin/bash -e
+
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPTDIR=`dirname "$0"`
+cd "${SCRIPTDIR}/.."
+
+mkdir -p generated
+
+OUTPUT_FILE=generated/art_module.cc
+
+echo "// Generated with ${0}" > ${OUTPUT_FILE}
+
+echo '
+
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+// TODO: Remove this pragma after llc can generate makeLLVMModuleContents()
+// with smaller frame size.
+
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Function.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Type.h>
+
+#include <vector>
+
+using namespace llvm;
+
+namespace art {
+namespace llvm {
+
+' >> ${OUTPUT_FILE}
+
+llc -march=cpp -cppgen=contents art_module.ll -o - >> ${OUTPUT_FILE}
+
+echo '
+} // namespace llvm
+} // namespace art' >> ${OUTPUT_FILE}
diff --git a/compiler/llvm/utils_llvm.h b/compiler/llvm/utils_llvm.h
new file mode 100644
index 0000000..2e273f4
--- /dev/null
+++ b/compiler/llvm/utils_llvm.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_UTILS_LLVM_H_
+#define ART_SRC_UTILS_LLVM_H_
+
+#include <llvm/Analysis/Verifier.h>
+
+namespace art {
+
+#ifndef NDEBUG
+#define VERIFY_LLVM_FUNCTION(func) ::llvm::verifyFunction(func, ::llvm::AbortProcessAction)
+#else
+#define VERIFY_LLVM_FUNCTION(func)
+#endif
+
+}  // namespace art
+
+#endif  // ART_SRC_UTILS_LLVM_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
new file mode 100644
index 0000000..0bfa4ec
--- /dev/null
+++ b/compiler/oat_writer.cc
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_writer.h"
+
+#include <zlib.h>
+
+#include "base/stl_util.h"
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "gc/space/space.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/array.h"
+#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
+#include "os.h"
+#include "output_stream.h"
+#include "safe_map.h"
+#include "scoped_thread_state_change.h"
+#include "verifier/method_verifier.h"
+
+namespace art {
+
+bool OatWriter::Create(OutputStream& output_stream,
+                       const std::vector<const DexFile*>& dex_files,
+                       uint32_t image_file_location_oat_checksum,
+                       uint32_t image_file_location_oat_begin,
+                       const std::string& image_file_location,
+                       const CompilerDriver& driver) {
+  OatWriter oat_writer(dex_files,
+                       image_file_location_oat_checksum,
+                       image_file_location_oat_begin,
+                       image_file_location,
+                       &driver);
+  return oat_writer.Write(output_stream);
+}
+
+OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
+                     uint32_t image_file_location_oat_checksum,
+                     uint32_t image_file_location_oat_begin,
+                     const std::string& image_file_location,
+                     const CompilerDriver* compiler)
+  : compiler_driver_(compiler),
+    dex_files_(&dex_files),
+    image_file_location_oat_checksum_(image_file_location_oat_checksum),
+    image_file_location_oat_begin_(image_file_location_oat_begin),
+    image_file_location_(image_file_location),
+    oat_header_(NULL),
+    size_dex_file_alignment_(0),
+    size_executable_offset_alignment_(0),
+    size_oat_header_(0),
+    size_oat_header_image_file_location_(0),
+    size_dex_file_(0),
+    size_interpreter_to_interpreter_entry_(0),
+    size_interpreter_to_quick_entry_(0),
+    size_portable_resolution_trampoline_(0),
+    size_quick_resolution_trampoline_(0),
+    size_stubs_alignment_(0),
+    size_code_size_(0),
+    size_code_(0),
+    size_code_alignment_(0),
+    size_mapping_table_(0),
+    size_vmap_table_(0),
+    size_gc_map_(0),
+    size_oat_dex_file_location_size_(0),
+    size_oat_dex_file_location_data_(0),
+    size_oat_dex_file_location_checksum_(0),
+    size_oat_dex_file_offset_(0),
+    size_oat_dex_file_methods_offsets_(0),
+    size_oat_class_status_(0),
+    size_oat_class_method_offsets_(0) {
+
+  size_t offset = InitOatHeader();
+  offset = InitOatDexFiles(offset);
+  offset = InitDexFiles(offset);
+  offset = InitOatClasses(offset);
+  offset = InitOatCode(offset);
+  offset = InitOatCodeDexFiles(offset);
+
+  CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
+  CHECK(image_file_location.empty() == compiler->IsImage());
+}
+
+OatWriter::~OatWriter() {
+  delete oat_header_;
+  STLDeleteElements(&oat_dex_files_);
+  STLDeleteElements(&oat_classes_);
+}
+
+size_t OatWriter::InitOatHeader() {
+  // create the OatHeader
+  oat_header_ = new OatHeader(compiler_driver_->GetInstructionSet(),
+                              dex_files_,
+                              image_file_location_oat_checksum_,
+                              image_file_location_oat_begin_,
+                              image_file_location_);
+  size_t offset = sizeof(*oat_header_);
+  offset += image_file_location_.size();
+  return offset;
+}
+
+size_t OatWriter::InitOatDexFiles(size_t offset) {
+  // create the OatDexFiles
+  for (size_t i = 0; i != dex_files_->size(); ++i) {
+    const DexFile* dex_file = (*dex_files_)[i];
+    CHECK(dex_file != NULL);
+    OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
+    oat_dex_files_.push_back(oat_dex_file);
+    offset += oat_dex_file->SizeOf();
+  }
+  return offset;
+}
+
+size_t OatWriter::InitDexFiles(size_t offset) {
+  // calculate the offsets within OatDexFiles to the DexFiles
+  for (size_t i = 0; i != dex_files_->size(); ++i) {
+    // dex files are required to be 4 byte aligned
+    size_t original_offset = offset;
+    offset = RoundUp(offset, 4);
+    size_dex_file_alignment_ += offset - original_offset;
+
+    // set offset in OatDexFile to DexFile
+    oat_dex_files_[i]->dex_file_offset_ = offset;
+
+    const DexFile* dex_file = (*dex_files_)[i];
+    offset += dex_file->GetHeader().file_size_;
+  }
+  return offset;
+}
+
+size_t OatWriter::InitOatClasses(size_t offset) {
+  // create the OatClasses
+  // calculate the offsets within OatDexFiles to OatClasses
+  for (size_t i = 0; i != dex_files_->size(); ++i) {
+    const DexFile* dex_file = (*dex_files_)[i];
+    for (size_t class_def_index = 0;
+         class_def_index < dex_file->NumClassDefs();
+         class_def_index++) {
+      oat_dex_files_[i]->methods_offsets_[class_def_index] = offset;
+      const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+      const byte* class_data = dex_file->GetClassData(class_def);
+      uint32_t num_methods = 0;
+      if (class_data != NULL) {  // ie not an empty class, such as a marker interface
+        ClassDataItemIterator it(*dex_file, class_data);
+        size_t num_direct_methods = it.NumDirectMethods();
+        size_t num_virtual_methods = it.NumVirtualMethods();
+        num_methods = num_direct_methods + num_virtual_methods;
+      }
+
+      ClassReference class_ref(dex_file, class_def_index);
+      CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
+      mirror::Class::Status status;
+      if (compiled_class != NULL) {
+        status = compiled_class->GetStatus();
+      } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) {
+        status = mirror::Class::kStatusError;
+      } else {
+        status = mirror::Class::kStatusNotReady;
+      }
+
+      OatClass* oat_class = new OatClass(offset, status, num_methods);
+      oat_classes_.push_back(oat_class);
+      offset += oat_class->SizeOf();
+    }
+    oat_dex_files_[i]->UpdateChecksum(*oat_header_);
+  }
+  return offset;
+}
+
+size_t OatWriter::InitOatCode(size_t offset) {
+  // calculate the offsets within OatHeader to executable code
+  size_t old_offset = offset;
+  // required to be on a new page boundary
+  offset = RoundUp(offset, kPageSize);
+  oat_header_->SetExecutableOffset(offset);
+  size_executable_offset_alignment_ = offset - old_offset;
+  if (compiler_driver_->IsImage()) {
+    InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+    oat_header_->SetInterpreterToInterpreterEntryOffset(offset);
+    interpreter_to_interpreter_entry_.reset(compiler_driver_->CreateInterpreterToInterpreterEntry());
+    offset += interpreter_to_interpreter_entry_->size();
+
+    offset = CompiledCode::AlignCode(offset, instruction_set);
+    oat_header_->SetInterpreterToQuickEntryOffset(offset);
+    interpreter_to_quick_entry_.reset(compiler_driver_->CreateInterpreterToQuickEntry());
+    offset += interpreter_to_quick_entry_->size();
+
+    offset = CompiledCode::AlignCode(offset, instruction_set);
+    oat_header_->SetPortableResolutionTrampolineOffset(offset);
+    portable_resolution_trampoline_.reset(compiler_driver_->CreatePortableResolutionTrampoline());
+    offset += portable_resolution_trampoline_->size();
+
+    offset = CompiledCode::AlignCode(offset, instruction_set);
+    oat_header_->SetQuickResolutionTrampolineOffset(offset);
+    quick_resolution_trampoline_.reset(compiler_driver_->CreateQuickResolutionTrampoline());
+    offset += quick_resolution_trampoline_->size();
+  } else {
+    oat_header_->SetInterpreterToInterpreterEntryOffset(0);
+    oat_header_->SetInterpreterToQuickEntryOffset(0);
+    oat_header_->SetPortableResolutionTrampolineOffset(0);
+    oat_header_->SetQuickResolutionTrampolineOffset(0);
+  }
+  return offset;
+}
+
+size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
+  size_t oat_class_index = 0;
+  for (size_t i = 0; i != dex_files_->size(); ++i) {
+    const DexFile* dex_file = (*dex_files_)[i];
+    CHECK(dex_file != NULL);
+    offset = InitOatCodeDexFile(offset, oat_class_index, *dex_file);
+  }
+  return offset;
+}
+
+size_t OatWriter::InitOatCodeDexFile(size_t offset,
+                                     size_t& oat_class_index,
+                                     const DexFile& dex_file) {
+  for (size_t class_def_index = 0;
+       class_def_index < dex_file.NumClassDefs();
+       class_def_index++, oat_class_index++) {
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    offset = InitOatCodeClassDef(offset, oat_class_index, class_def_index, dex_file, class_def);
+    oat_classes_[oat_class_index]->UpdateChecksum(*oat_header_);
+  }
+  return offset;
+}
+
+size_t OatWriter::InitOatCodeClassDef(size_t offset,
+                                      size_t oat_class_index, size_t class_def_index,
+                                      const DexFile& dex_file,
+                                      const DexFile::ClassDef& class_def) {
+  const byte* class_data = dex_file.GetClassData(class_def);
+  if (class_data == NULL) {
+    // empty class, such as a marker interface
+    return offset;
+  }
+  ClassDataItemIterator it(dex_file, class_data);
+  CHECK_EQ(oat_classes_[oat_class_index]->method_offsets_.size(),
+           it.NumDirectMethods() + it.NumVirtualMethods());
+  // Skip fields
+  while (it.HasNextStaticField()) {
+    it.Next();
+  }
+  while (it.HasNextInstanceField()) {
+    it.Next();
+  }
+  // Process methods
+  size_t class_def_method_index = 0;
+  while (it.HasNextDirectMethod()) {
+    bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+    offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
+                               is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
+                               &dex_file);
+    class_def_method_index++;
+    it.Next();
+  }
+  while (it.HasNextVirtualMethod()) {
+    bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+    offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
+                               is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
+                               &dex_file);
+    class_def_method_index++;
+    it.Next();
+  }
+  DCHECK(!it.HasNext());
+  return offset;
+}
+
+size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
+                                    size_t __attribute__((unused)) class_def_index,
+                                    size_t class_def_method_index,
+                                    bool __attribute__((unused)) is_native,
+                                    InvokeType invoke_type,
+                                    uint32_t method_idx, const DexFile* dex_file) {
+  // derived from CompiledMethod if available
+  uint32_t code_offset = 0;
+  uint32_t frame_size_in_bytes = kStackAlignment;
+  uint32_t core_spill_mask = 0;
+  uint32_t fp_spill_mask = 0;
+  uint32_t mapping_table_offset = 0;
+  uint32_t vmap_table_offset = 0;
+  uint32_t gc_map_offset = 0;
+
+  OatClass* oat_class = oat_classes_[oat_class_index];
+#if defined(ART_USE_PORTABLE_COMPILER)
+  size_t oat_method_offsets_offset =
+      oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
+#endif
+
+  CompiledMethod* compiled_method =
+      compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+  if (compiled_method != NULL) {
+#if defined(ART_USE_PORTABLE_COMPILER)
+    compiled_method->AddOatdataOffsetToCompliledCodeOffset(
+        oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_));
+#else
+    const std::vector<uint8_t>& code = compiled_method->GetCode();
+    offset = compiled_method->AlignCode(offset);
+    DCHECK_ALIGNED(offset, kArmAlignment);
+    uint32_t code_size = code.size() * sizeof(code[0]);
+    CHECK_NE(code_size, 0U);
+    uint32_t thumb_offset = compiled_method->CodeDelta();
+    code_offset = offset + sizeof(code_size) + thumb_offset;
+
+    // Deduplicate code arrays
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = code_offsets_.find(&code);
+    if (code_iter != code_offsets_.end()) {
+      code_offset = code_iter->second;
+    } else {
+      code_offsets_.Put(&code, code_offset);
+      offset += sizeof(code_size);  // code size is prepended before code
+      offset += code_size;
+      oat_header_->UpdateChecksum(&code[0], code_size);
+    }
+#endif
+    frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+    core_spill_mask = compiled_method->GetCoreSpillMask();
+    fp_spill_mask = compiled_method->GetFpSpillMask();
+
+    const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+    size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
+    mapping_table_offset = (mapping_table_size == 0) ? 0 : offset;
+
+    // Deduplicate mapping tables
+    SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter = mapping_table_offsets_.find(&mapping_table);
+    if (mapping_iter != mapping_table_offsets_.end()) {
+      mapping_table_offset = mapping_iter->second;
+    } else {
+      mapping_table_offsets_.Put(&mapping_table, mapping_table_offset);
+      offset += mapping_table_size;
+      oat_header_->UpdateChecksum(&mapping_table[0], mapping_table_size);
+    }
+
+    const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+    size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
+    vmap_table_offset = (vmap_table_size == 0) ? 0 : offset;
+
+    // Deduplicate vmap tables
+    SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter = vmap_table_offsets_.find(&vmap_table);
+    if (vmap_iter != vmap_table_offsets_.end()) {
+      vmap_table_offset = vmap_iter->second;
+    } else {
+      vmap_table_offsets_.Put(&vmap_table, vmap_table_offset);
+      offset += vmap_table_size;
+      oat_header_->UpdateChecksum(&vmap_table[0], vmap_table_size);
+    }
+
+    const std::vector<uint8_t>& gc_map = compiled_method->GetGcMap();
+    size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
+    gc_map_offset = (gc_map_size == 0) ? 0 : offset;
+
+#if !defined(NDEBUG)
+    // We expect GC maps except when the class hasn't been verified or the method is native
+    ClassReference class_ref(dex_file, class_def_index);
+    CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
+    mirror::Class::Status status;
+    if (compiled_class != NULL) {
+      status = compiled_class->GetStatus();
+    } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) {
+      status = mirror::Class::kStatusError;
+    } else {
+      status = mirror::Class::kStatusNotReady;
+    }
+    CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
+        << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
+        << (status < mirror::Class::kStatusVerified) << " " << status << " "
+        << PrettyMethod(method_idx, *dex_file);
+#endif
+
+    // Deduplicate GC maps
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator gc_map_iter = gc_map_offsets_.find(&gc_map);
+    if (gc_map_iter != gc_map_offsets_.end()) {
+      gc_map_offset = gc_map_iter->second;
+    } else {
+      gc_map_offsets_.Put(&gc_map, gc_map_offset);
+      offset += gc_map_size;
+      oat_header_->UpdateChecksum(&gc_map[0], gc_map_size);
+    }
+  }
+
+  oat_class->method_offsets_[class_def_method_index]
+      = OatMethodOffsets(code_offset,
+                         frame_size_in_bytes,
+                         core_spill_mask,
+                         fp_spill_mask,
+                         mapping_table_offset,
+                         vmap_table_offset,
+                         gc_map_offset
+                         );
+
+  if (compiler_driver_->IsImage()) {
+    ClassLinker* linker = Runtime::Current()->GetClassLinker();
+    mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
+    // Unchecked as we hold mutator_lock_ on entry.
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    mirror::AbstractMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache,
+                                                           NULL, NULL, invoke_type);
+    CHECK(method != NULL);
+    method->SetFrameSizeInBytes(frame_size_in_bytes);
+    method->SetCoreSpillMask(core_spill_mask);
+    method->SetFpSpillMask(fp_spill_mask);
+    method->SetOatMappingTableOffset(mapping_table_offset);
+    // Don't overwrite static method trampoline
+    if (!method->IsStatic() || method->IsConstructor() ||
+        method->GetDeclaringClass()->IsInitialized()) {
+      method->SetOatCodeOffset(code_offset);
+    } else {
+      method->SetEntryPointFromCompiledCode(NULL);
+    }
+    method->SetOatVmapTableOffset(vmap_table_offset);
+    method->SetOatNativeGcMapOffset(gc_map_offset);
+  }
+
+  return offset;
+}
+
+#define DCHECK_OFFSET() \
+  DCHECK_EQ(static_cast<off_t>(offset), out.Seek(0, kSeekCurrent))
+
+#define DCHECK_OFFSET_() \
+  DCHECK_EQ(static_cast<off_t>(offset_), out.Seek(0, kSeekCurrent))
+
+bool OatWriter::Write(OutputStream& out) {
+  if (!out.WriteFully(oat_header_, sizeof(*oat_header_))) {
+    PLOG(ERROR) << "Failed to write oat header to " << out.GetLocation();
+    return false;
+  }
+  size_oat_header_ += sizeof(*oat_header_);
+
+  if (!out.WriteFully(image_file_location_.data(), image_file_location_.size())) {
+    PLOG(ERROR) << "Failed to write oat header image file location to " << out.GetLocation();
+    return false;
+  }
+  size_oat_header_image_file_location_ += image_file_location_.size();
+
+  if (!WriteTables(out)) {
+    LOG(ERROR) << "Failed to write oat tables to " << out.GetLocation();
+    return false;
+  }
+
+  size_t code_offset = WriteCode(out);
+  if (code_offset == 0) {
+    LOG(ERROR) << "Failed to write oat code to " << out.GetLocation();
+    return false;
+  }
+
+  code_offset = WriteCodeDexFiles(out, code_offset);
+  if (code_offset == 0) {
+    LOG(ERROR) << "Failed to write oat code for dex files to " << out.GetLocation();
+    return false;
+  }
+
+  if (kIsDebugBuild) {
+    uint32_t size_total = 0;
+    #define DO_STAT(x) \
+      LOG(INFO) << #x "=" << PrettySize(x) << " (" << x << "B)"; \
+      size_total += x;
+
+    DO_STAT(size_dex_file_alignment_);
+    DO_STAT(size_executable_offset_alignment_);
+    DO_STAT(size_oat_header_);
+    DO_STAT(size_oat_header_image_file_location_);
+    DO_STAT(size_dex_file_);
+    DO_STAT(size_interpreter_to_interpreter_entry_);
+    DO_STAT(size_interpreter_to_quick_entry_);
+    DO_STAT(size_portable_resolution_trampoline_);
+    DO_STAT(size_quick_resolution_trampoline_);
+    DO_STAT(size_stubs_alignment_);
+    DO_STAT(size_code_size_);
+    DO_STAT(size_code_);
+    DO_STAT(size_code_alignment_);
+    DO_STAT(size_mapping_table_);
+    DO_STAT(size_vmap_table_);
+    DO_STAT(size_gc_map_);
+    DO_STAT(size_oat_dex_file_location_size_);
+    DO_STAT(size_oat_dex_file_location_data_);
+    DO_STAT(size_oat_dex_file_location_checksum_);
+    DO_STAT(size_oat_dex_file_offset_);
+    DO_STAT(size_oat_dex_file_methods_offsets_);
+    DO_STAT(size_oat_class_status_);
+    DO_STAT(size_oat_class_method_offsets_);
+    #undef DO_STAT
+
+    LOG(INFO) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \
+    CHECK_EQ(size_total, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+  }
+
+  return true;
+}
+
+bool OatWriter::WriteTables(OutputStream& out) {
+  for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
+    if (!oat_dex_files_[i]->Write(this, out)) {
+      PLOG(ERROR) << "Failed to write oat dex information to " << out.GetLocation();
+      return false;
+    }
+  }
+  for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
+    uint32_t expected_offset = oat_dex_files_[i]->dex_file_offset_;
+    off_t actual_offset = out.Seek(expected_offset, kSeekSet);
+    if (static_cast<uint32_t>(actual_offset) != expected_offset) {
+      const DexFile* dex_file = (*dex_files_)[i];
+      PLOG(ERROR) << "Failed to seek to dex file section. Actual: " << actual_offset
+                  << " Expected: " << expected_offset << " File: " << dex_file->GetLocation();
+      return false;
+    }
+    const DexFile* dex_file = (*dex_files_)[i];
+    if (!out.WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) {
+      PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation() << " to " << out.GetLocation();
+      return false;
+    }
+    size_dex_file_ += dex_file->GetHeader().file_size_;
+  }
+  for (size_t i = 0; i != oat_classes_.size(); ++i) {
+    if (!oat_classes_[i]->Write(this, out)) {
+      PLOG(ERROR) << "Failed to write oat methods information to " << out.GetLocation();
+      return false;
+    }
+  }
+  return true;
+}
+
+size_t OatWriter::WriteCode(OutputStream& out) {
+  uint32_t offset = oat_header_->GetExecutableOffset();
+  off_t new_offset = out.Seek(size_executable_offset_alignment_, kSeekCurrent);
+  if (static_cast<uint32_t>(new_offset) != offset) {
+    PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
+                << " Expected: " << offset << " File: " << out.GetLocation();
+    return 0;
+  }
+  DCHECK_OFFSET();
+  if (compiler_driver_->IsImage()) {
+    InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+    if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0], interpreter_to_interpreter_entry_->size())) {
+      PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation();
+      return false;
+    }
+    size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size();
+    offset += interpreter_to_interpreter_entry_->size();
+    DCHECK_OFFSET();
+
+    uint32_t aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+    uint32_t alignment_padding = aligned_offset - offset;
+    out.Seek(alignment_padding, kSeekCurrent);
+    size_stubs_alignment_ += alignment_padding;
+    if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) {
+      PLOG(ERROR) << "Failed to write interpreter to quick entry to " << out.GetLocation();
+      return false;
+    }
+    size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size();
+    offset += alignment_padding + interpreter_to_quick_entry_->size();
+    DCHECK_OFFSET();
+
+    aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+    alignment_padding = aligned_offset - offset;
+    out.Seek(alignment_padding, kSeekCurrent);
+    size_stubs_alignment_ += alignment_padding;
+    if (!out.WriteFully(&(*portable_resolution_trampoline_)[0], portable_resolution_trampoline_->size())) {
+      PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation();
+      return false;
+    }
+    size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size();
+    offset += alignment_padding + portable_resolution_trampoline_->size();
+    DCHECK_OFFSET();
+
+    aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+    alignment_padding = aligned_offset - offset;
+    out.Seek(alignment_padding, kSeekCurrent);
+    size_stubs_alignment_ += alignment_padding;
+    if (!out.WriteFully(&(*quick_resolution_trampoline_)[0], quick_resolution_trampoline_->size())) {
+      PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation();
+      return false;
+    }
+    size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size();
+    offset += alignment_padding + quick_resolution_trampoline_->size();
+    DCHECK_OFFSET();
+  }
+  return offset;
+}
+
+size_t OatWriter::WriteCodeDexFiles(OutputStream& out, size_t code_offset) {
+  size_t oat_class_index = 0;
+  for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
+    const DexFile* dex_file = (*dex_files_)[i];
+    CHECK(dex_file != NULL);
+    code_offset = WriteCodeDexFile(out, code_offset, oat_class_index, *dex_file);
+    if (code_offset == 0) {
+      return 0;
+    }
+  }
+  return code_offset;
+}
+
+size_t OatWriter::WriteCodeDexFile(OutputStream& out, size_t code_offset, size_t& oat_class_index,
+                                   const DexFile& dex_file) {
+  for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
+      class_def_index++, oat_class_index++) {
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    code_offset = WriteCodeClassDef(out, code_offset, oat_class_index, dex_file, class_def);
+    if (code_offset == 0) {
+      return 0;
+    }
+  }
+  return code_offset;
+}
+
+void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx,
+                                   const DexFile& dex_file, OutputStream& out) const {
+  PLOG(ERROR) << "Failed to write " << what << " for " << PrettyMethod(method_idx, dex_file)
+      << " to " << out.GetLocation();
+}
+
+size_t OatWriter::WriteCodeClassDef(OutputStream& out,
+                                    size_t code_offset, size_t oat_class_index,
+                                    const DexFile& dex_file,
+                                    const DexFile::ClassDef& class_def) {
+  const byte* class_data = dex_file.GetClassData(class_def);
+  if (class_data == NULL) {
+    // ie. an empty class such as a marker interface
+    return code_offset;
+  }
+  ClassDataItemIterator it(dex_file, class_data);
+  // Skip fields
+  while (it.HasNextStaticField()) {
+    it.Next();
+  }
+  while (it.HasNextInstanceField()) {
+    it.Next();
+  }
+  // Process methods
+  size_t class_def_method_index = 0;
+  while (it.HasNextDirectMethod()) {
+    bool is_static = (it.GetMemberAccessFlags() & kAccStatic) != 0;
+    code_offset = WriteCodeMethod(out, code_offset, oat_class_index, class_def_method_index,
+                                  is_static, it.GetMemberIndex(), dex_file);
+    if (code_offset == 0) {
+      return 0;
+    }
+    class_def_method_index++;
+    it.Next();
+  }
+  while (it.HasNextVirtualMethod()) {
+    code_offset = WriteCodeMethod(out, code_offset, oat_class_index, class_def_method_index,
+                                  false, it.GetMemberIndex(), dex_file);
+    if (code_offset == 0) {
+      return 0;
+    }
+    class_def_method_index++;
+    it.Next();
+  }
+  return code_offset;
+}
+
+size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_class_index,
+                                  size_t class_def_method_index, bool is_static,
+                                  uint32_t method_idx, const DexFile& dex_file) {
+  const CompiledMethod* compiled_method =
+      compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
+
+  OatMethodOffsets method_offsets =
+      oat_classes_[oat_class_index]->method_offsets_[class_def_method_index];
+
+
+  if (compiled_method != NULL) {  // ie. not an abstract method
+#if !defined(ART_USE_PORTABLE_COMPILER)
+    uint32_t aligned_offset = compiled_method->AlignCode(offset);
+    uint32_t aligned_code_delta = aligned_offset - offset;
+    if (aligned_code_delta != 0) {
+      off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent);
+      size_code_alignment_ += aligned_code_delta;
+      if (static_cast<uint32_t>(new_offset) != aligned_offset) {
+        PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset
+                    << " Expected: " << aligned_offset << " File: " << out.GetLocation();
+        return 0;
+      }
+      offset += aligned_code_delta;
+      DCHECK_OFFSET();
+    }
+    DCHECK_ALIGNED(offset, kArmAlignment);
+    const std::vector<uint8_t>& code = compiled_method->GetCode();
+    uint32_t code_size = code.size() * sizeof(code[0]);
+    CHECK_NE(code_size, 0U);
+
+    // Deduplicate code arrays
+    size_t code_offset = offset + sizeof(code_size) + compiled_method->CodeDelta();
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter = code_offsets_.find(&code);
+    if (code_iter != code_offsets_.end() && code_offset != method_offsets.code_offset_) {
+      DCHECK(code_iter->second == method_offsets.code_offset_)
+          << PrettyMethod(method_idx, dex_file);
+    } else {
+      DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file);
+      if (!out.WriteFully(&code_size, sizeof(code_size))) {
+        ReportWriteFailure("method code size", method_idx, dex_file, out);
+        return 0;
+      }
+      size_code_size_ += sizeof(code_size);
+      offset += sizeof(code_size);
+      DCHECK_OFFSET();
+      if (!out.WriteFully(&code[0], code_size)) {
+        ReportWriteFailure("method code", method_idx, dex_file, out);
+        return 0;
+      }
+      size_code_ += code_size;
+      offset += code_size;
+    }
+    DCHECK_OFFSET();
+#endif
+
+    const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+    size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
+
+    // Deduplicate mapping tables
+    SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter =
+        mapping_table_offsets_.find(&mapping_table);
+    if (mapping_iter != mapping_table_offsets_.end() &&
+        offset != method_offsets.mapping_table_offset_) {
+      DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
+          || mapping_iter->second == method_offsets.mapping_table_offset_)
+          << PrettyMethod(method_idx, dex_file);
+    } else {
+      DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
+          || offset == method_offsets.mapping_table_offset_)
+          << PrettyMethod(method_idx, dex_file);
+      if (!out.WriteFully(&mapping_table[0], mapping_table_size)) {
+        ReportWriteFailure("mapping table", method_idx, dex_file, out);
+        return 0;
+      }
+      size_mapping_table_ += mapping_table_size;
+      offset += mapping_table_size;
+    }
+    DCHECK_OFFSET();
+
+    const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+    size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
+
+    // Deduplicate vmap tables
+    SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter =
+        vmap_table_offsets_.find(&vmap_table);
+    if (vmap_iter != vmap_table_offsets_.end() &&
+        offset != method_offsets.vmap_table_offset_) {
+      DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
+          || vmap_iter->second == method_offsets.vmap_table_offset_)
+          << PrettyMethod(method_idx, dex_file);
+    } else {
+      DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
+          || offset == method_offsets.vmap_table_offset_)
+          << PrettyMethod(method_idx, dex_file);
+      if (!out.WriteFully(&vmap_table[0], vmap_table_size)) {
+        ReportWriteFailure("vmap table", method_idx, dex_file, out);
+        return 0;
+      }
+      size_vmap_table_ += vmap_table_size;
+      offset += vmap_table_size;
+    }
+    DCHECK_OFFSET();
+
+    const std::vector<uint8_t>& gc_map = compiled_method->GetGcMap();
+    size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
+
+    // Deduplicate GC maps
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator gc_map_iter =
+        gc_map_offsets_.find(&gc_map);
+    if (gc_map_iter != gc_map_offsets_.end() &&
+        offset != method_offsets.gc_map_offset_) {
+      DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
+          || gc_map_iter->second == method_offsets.gc_map_offset_)
+          << PrettyMethod(method_idx, dex_file);
+    } else {
+      DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
+          || offset == method_offsets.gc_map_offset_)
+          << PrettyMethod(method_idx, dex_file);
+      if (!out.WriteFully(&gc_map[0], gc_map_size)) {
+        ReportWriteFailure("GC map", method_idx, dex_file, out);
+        return 0;
+      }
+      size_gc_map_ += gc_map_size;
+      offset += gc_map_size;
+    }
+    DCHECK_OFFSET();
+  }
+
+  return offset;
+}
+
+OatWriter::OatDexFile::OatDexFile(size_t offset, const DexFile& dex_file) {
+  offset_ = offset;
+  const std::string& location(dex_file.GetLocation());
+  dex_file_location_size_ = location.size();
+  dex_file_location_data_ = reinterpret_cast<const uint8_t*>(location.data());
+  dex_file_location_checksum_ = dex_file.GetLocationChecksum();
+  dex_file_offset_ = 0;
+  methods_offsets_.resize(dex_file.NumClassDefs());
+}
+
+size_t OatWriter::OatDexFile::SizeOf() const {
+  return sizeof(dex_file_location_size_)
+          + dex_file_location_size_
+          + sizeof(dex_file_location_checksum_)
+          + sizeof(dex_file_offset_)
+          + (sizeof(methods_offsets_[0]) * methods_offsets_.size());
+}
+
+void OatWriter::OatDexFile::UpdateChecksum(OatHeader& oat_header) const {
+  oat_header.UpdateChecksum(&dex_file_location_size_, sizeof(dex_file_location_size_));
+  oat_header.UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
+  oat_header.UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
+  oat_header.UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
+  oat_header.UpdateChecksum(&methods_offsets_[0],
+                            sizeof(methods_offsets_[0]) * methods_offsets_.size());
+}
+
+bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, OutputStream& out) const {
+  DCHECK_OFFSET_();
+  if (!out.WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
+    PLOG(ERROR) << "Failed to write dex file location length to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_dex_file_location_size_ += sizeof(dex_file_location_size_);
+  if (!out.WriteFully(dex_file_location_data_, dex_file_location_size_)) {
+    PLOG(ERROR) << "Failed to write dex file location data to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_dex_file_location_data_ += dex_file_location_size_;
+  if (!out.WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
+    PLOG(ERROR) << "Failed to write dex file location checksum to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_dex_file_location_checksum_ += sizeof(dex_file_location_checksum_);
+  if (!out.WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
+    PLOG(ERROR) << "Failed to write dex file offset to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
+  if (!out.WriteFully(&methods_offsets_[0],
+                      sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
+    PLOG(ERROR) << "Failed to write methods offsets to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_dex_file_methods_offsets_ +=
+      sizeof(methods_offsets_[0]) * methods_offsets_.size();
+  return true;
+}
+
+OatWriter::OatClass::OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count) {
+  offset_ = offset;
+  status_ = status;
+  method_offsets_.resize(methods_count);
+}
+
+size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader(
+    size_t class_def_method_index_) const {
+  return offset_ + GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
+}
+
+size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatClass(
+    size_t class_def_method_index_) const {
+  return sizeof(status_)
+          + (sizeof(method_offsets_[0]) * class_def_method_index_);
+}
+
+size_t OatWriter::OatClass::SizeOf() const {
+  return GetOatMethodOffsetsOffsetFromOatClass(method_offsets_.size());
+}
+
+void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
+  oat_header.UpdateChecksum(&status_, sizeof(status_));
+  oat_header.UpdateChecksum(&method_offsets_[0],
+                            sizeof(method_offsets_[0]) * method_offsets_.size());
+}
+
+bool OatWriter::OatClass::Write(OatWriter* oat_writer, OutputStream& out) const {
+  DCHECK_OFFSET_();
+  if (!out.WriteFully(&status_, sizeof(status_))) {
+    PLOG(ERROR) << "Failed to write class status to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_class_status_ += sizeof(status_);
+  DCHECK_EQ(static_cast<off_t>(GetOatMethodOffsetsOffsetFromOatHeader(0)),
+            out.Seek(0, kSeekCurrent));
+  if (!out.WriteFully(&method_offsets_[0],
+                      sizeof(method_offsets_[0]) * method_offsets_.size())) {
+    PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
+  DCHECK_EQ(static_cast<off_t>(GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())),
+            out.Seek(0, kSeekCurrent));
+  return true;
+}
+
+}  // namespace art
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
new file mode 100644
index 0000000..1f97bf8
--- /dev/null
+++ b/compiler/oat_writer.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_WRITER_H_
+#define ART_SRC_OAT_WRITER_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+
+#include "driver/compiler_driver.h"
+#include "mem_map.h"
+#include "oat.h"
+#include "mirror/class.h"
+#include "safe_map.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+class OutputStream;
+
+// OatHeader         variable length with count of D OatDexFiles
+//
+// OatDexFile[0]     one variable sized OatDexFile with offsets to Dex and OatClasses
+// OatDexFile[1]
+// ...
+// OatDexFile[D]
+//
+// Dex[0]            one variable sized DexFile for each OatDexFile.
+// Dex[1]            these are literal copies of the input .dex files.
+// ...
+// Dex[D]
+//
+// OatClass[0]       one variable sized OatClass for each of C DexFile::ClassDefs
+// OatClass[1]       contains OatClass entries with class status, offsets to code, etc.
+// ...
+// OatClass[C]
+//
+// padding           if necessary so that the following code will be page aligned
+//
+// CompiledMethod    one variable sized blob with the contents of each CompiledMethod
+// CompiledMethod
+// CompiledMethod
+// CompiledMethod
+// CompiledMethod
+// CompiledMethod
+// ...
+// CompiledMethod
+//
+class OatWriter {
+ public:
+  // Write an oat file. Returns true on success, false on failure.
+  static bool Create(OutputStream& out,
+                     const std::vector<const DexFile*>& dex_files,
+                     uint32_t image_file_location_oat_checksum,
+                     uint32_t image_file_location_oat_begin,
+                     const std::string& image_file_location,
+                     const CompilerDriver& compiler)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+  OatWriter(const std::vector<const DexFile*>& dex_files,
+            uint32_t image_file_location_oat_checksum,
+            uint32_t image_file_location_oat_begin,
+            const std::string& image_file_location,
+            const CompilerDriver* compiler) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ~OatWriter();
+
+  size_t InitOatHeader();
+  size_t InitOatDexFiles(size_t offset);
+  size_t InitDexFiles(size_t offset);
+  size_t InitOatClasses(size_t offset);
+  size_t InitOatCode(size_t offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t InitOatCodeDexFiles(size_t offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t InitOatCodeDexFile(size_t offset,
+                            size_t& oat_class_index,
+                            const DexFile& dex_file)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t InitOatCodeClassDef(size_t offset,
+                             size_t oat_class_index, size_t class_def_index,
+                             const DexFile& dex_file,
+                             const DexFile::ClassDef& class_def)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index,
+                           size_t class_def_method_index, bool is_native, InvokeType type,
+                           uint32_t method_idx, const DexFile*)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool Write(OutputStream& out);
+  bool WriteTables(OutputStream& out);
+  size_t WriteCode(OutputStream& out);
+  size_t WriteCodeDexFiles(OutputStream& out, size_t offset);
+  size_t WriteCodeDexFile(OutputStream& out, size_t offset, size_t& oat_class_index,
+                          const DexFile& dex_file);
+  size_t WriteCodeClassDef(OutputStream& out, size_t offset, size_t oat_class_index,
+                           const DexFile& dex_file, const DexFile::ClassDef& class_def);
+  size_t WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_class_index,
+                         size_t class_def_method_index, bool is_static, uint32_t method_idx,
+                         const DexFile& dex_file);
+
+  void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
+                          OutputStream& out) const;
+
+  class OatDexFile {
+   public:
+    explicit OatDexFile(size_t offset, const DexFile& dex_file);
+    size_t SizeOf() const;
+    void UpdateChecksum(OatHeader& oat_header) const;
+    bool Write(OatWriter* oat_writer, OutputStream& out) const;
+
+    // Offset of start of OatDexFile from beginning of OatHeader. It is
+    // used to validate file position when writing.
+    size_t offset_;
+
+    // data to write
+    uint32_t dex_file_location_size_;
+    const uint8_t* dex_file_location_data_;
+    uint32_t dex_file_location_checksum_;
+    uint32_t dex_file_offset_;
+    std::vector<uint32_t> methods_offsets_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(OatDexFile);
+  };
+
+  class OatClass {
+   public:
+    explicit OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count);
+    size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
+    size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
+    size_t SizeOf() const;
+    void UpdateChecksum(OatHeader& oat_header) const;
+    bool Write(OatWriter* oat_writer, OutputStream& out) const;
+
+    // Offset of start of OatClass from beginning of OatHeader. It is
+    // used to validate file position when writing. For Portable, it
+    // is also used to calculate the position of the OatMethodOffsets
+    // so that code pointers within the OatMethodOffsets can be
+    // patched to point to code in the Portable .o ELF objects.
+    size_t offset_;
+
+    // data to write
+    mirror::Class::Status status_;
+    std::vector<OatMethodOffsets> method_offsets_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(OatClass);
+  };
+
+  const CompilerDriver* const compiler_driver_;
+
+  // note OatFile does not take ownership of the DexFiles
+  const std::vector<const DexFile*>* dex_files_;
+
+  // dependencies on the image.
+  uint32_t image_file_location_oat_checksum_;
+  uint32_t image_file_location_oat_begin_;
+  std::string image_file_location_;
+
+  // data to write
+  OatHeader* oat_header_;
+  std::vector<OatDexFile*> oat_dex_files_;
+  std::vector<OatClass*> oat_classes_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_entry_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_quick_entry_;
+  UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
+  UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
+
+  // output stats
+  uint32_t size_dex_file_alignment_;
+  uint32_t size_executable_offset_alignment_;
+  uint32_t size_oat_header_;
+  uint32_t size_oat_header_image_file_location_;
+  uint32_t size_dex_file_;
+  uint32_t size_interpreter_to_interpreter_entry_;
+  uint32_t size_interpreter_to_quick_entry_;
+  uint32_t size_portable_resolution_trampoline_;
+  uint32_t size_quick_resolution_trampoline_;
+  uint32_t size_stubs_alignment_;
+  uint32_t size_code_size_;
+  uint32_t size_code_;
+  uint32_t size_code_alignment_;
+  uint32_t size_mapping_table_;
+  uint32_t size_vmap_table_;
+  uint32_t size_gc_map_;
+  uint32_t size_oat_dex_file_location_size_;
+  uint32_t size_oat_dex_file_location_data_;
+  uint32_t size_oat_dex_file_location_checksum_;
+  uint32_t size_oat_dex_file_offset_;
+  uint32_t size_oat_dex_file_methods_offsets_;
+  uint32_t size_oat_class_status_;
+  uint32_t size_oat_class_method_offsets_;
+
+  template <class T> struct MapCompare {
+   public:
+    bool operator() (const T* const &a, const T* const &b) const {
+      return *a < *b;
+    }
+  };
+
+  // code mappings for deduplication
+  SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > code_offsets_;
+  SafeMap<const std::vector<uint16_t>*, uint32_t, MapCompare<std::vector<uint16_t> > > vmap_table_offsets_;
+  SafeMap<const std::vector<uint32_t>*, uint32_t, MapCompare<std::vector<uint32_t> > > mapping_table_offsets_;
+  SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > gc_map_offsets_;
+
+  DISALLOW_COPY_AND_ASSIGN(OatWriter);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_WRITER_H_
diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc
new file mode 100644
index 0000000..9af7eff
--- /dev/null
+++ b/compiler/sea_ir/frontend.cc
@@ -0,0 +1,67 @@
+#ifdef ART_SEA_IR_MODE
+#include <llvm/Support/Threading.h>
+#include "base/logging.h"
+#include "dex/portable/mir_to_gbc.h"
+#include "driver/compiler_driver.h"
+#include "leb128.h"
+#include "llvm/llvm_compilation_unit.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "sea_ir/sea.h"
+
+namespace art {
+
+static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler,
+                                     const CompilerBackend compiler_backend,
+                                     const DexFile::CodeItem* code_item,
+                                     uint32_t access_flags, InvokeType invoke_type,
+                                     uint32_t class_def_idx, uint32_t method_idx,
+                                     jobject class_loader, const DexFile& dex_file
+#if defined(ART_USE_PORTABLE_COMPILER)
+                                     , llvm::LlvmCompilationUnit* llvm_compilation_unit
+#endif
+) {
+  // NOTE: Instead of keeping the convention from the Dalvik frontend.cc
+  //       and silencing the cpplint.py warning, I just corrected the formatting.
+  VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
+  sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph();
+  sg->CompileMethod(code_item, class_def_idx, method_idx, dex_file);
+  sg->DumpSea("/tmp/temp.dot");
+  CHECK(0 && "No SEA compiled function exists yet.");
+  return NULL;
+}
+
+
+CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler,
+                                 const CompilerBackend backend,
+                                 const DexFile::CodeItem* code_item,
+                                 uint32_t access_flags,
+                                 InvokeType invoke_type,
+                                 uint32_t class_def_idx,
+                                 uint32_t method_idx,
+                                 jobject class_loader,
+                                 const DexFile& dex_file,
+                                 llvm::LlvmCompilationUnit* llvm_compilation_unit) {
+  return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
+                       method_idx, class_loader, dex_file
+#if defined(ART_USE_PORTABLE_COMPILER)
+                       , llvm_compilation_unit
+#endif
+                       ); // NOLINT
+}
+
+extern "C" art::CompiledMethod*
+    SeaIrCompileMethod(art::CompilerDriver& compiler,
+                          const art::DexFile::CodeItem* code_item,
+                          uint32_t access_flags, art::InvokeType invoke_type,
+                          uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                          const art::DexFile& dex_file) {
+  // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use build default
+  art::CompilerBackend backend = compiler.GetCompilerBackend();
+  return art::SeaIrCompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+                               class_def_idx, method_idx, class_loader, dex_file,
+                               NULL /* use thread llvm_info */);
+}
+#endif
+
+} // end namespace art
diff --git a/compiler/sea_ir/instruction_tools.cc b/compiler/sea_ir/instruction_tools.cc
new file mode 100644
index 0000000..5433591
--- /dev/null
+++ b/compiler/sea_ir/instruction_tools.cc
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_tools.h"
+
+namespace sea_ir {
+
+bool InstructionTools::IsDefinition(const art::Instruction* const instruction) {
+  if (0 != (InstructionTools::instruction_attributes_[instruction->Opcode()] & (1 << kDA))) {
+    return true;
+  }
+  return false;
+}
+
+const int InstructionTools::instruction_attributes_[] = {
+  // 00 NOP
+  DF_NOP,
+
+  // 01 MOVE vA, vB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 02 MOVE_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 03 MOVE_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 04 MOVE_WIDE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 06 MOVE_WIDE_16 vAAAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 07 MOVE_OBJECT vA, vB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 0A MOVE_RESULT vAA
+  DF_DA,
+
+  // 0B MOVE_RESULT_WIDE vAA
+  DF_DA | DF_A_WIDE,
+
+  // 0C MOVE_RESULT_OBJECT vAA
+  DF_DA | DF_REF_A,
+
+  // 0D MOVE_EXCEPTION vAA
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 0E RETURN_VOID
+  DF_NOP,
+
+  // 0F RETURN vAA
+  DF_UA,
+
+  // 10 RETURN_WIDE vAA
+  DF_UA | DF_A_WIDE,
+
+  // 11 RETURN_OBJECT vAA
+  DF_UA | DF_REF_A,
+
+  // 12 CONST_4 vA, #+B
+  DF_DA | DF_SETS_CONST,
+
+  // 13 CONST_16 vAA, #+BBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 14 CONST vAA, #+BBBBBBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 15 CONST_HIGH16 VAA, #+BBBB0000
+  DF_DA | DF_SETS_CONST,
+
+  // 16 CONST_WIDE_16 vAA, #+BBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 1A CONST_STRING vAA, string@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1C CONST_CLASS vAA, type@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1D MONITOR_ENTER vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1E MONITOR_EXIT vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1F CHK_CAST vAA, type@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 20 INSTANCE_OF vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+  // 21 ARRAY_LENGTH vA, vB
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+  // 22 NEW_INSTANCE vAA, type@BBBB
+  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+  // 23 NEW_ARRAY vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 27 THROW vAA
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 28 GOTO
+  DF_NOP,
+
+  // 29 GOTO_16
+  DF_NOP,
+
+  // 2A GOTO_32
+  DF_NOP,
+
+  // 2B PACKED_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2D CMPL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2E CMPG_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2F CMPL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 30 CMPG_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 31 CMP_LONG vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 32 IF_EQ vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 33 IF_NE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 34 IF_LT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 35 IF_GE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 36 IF_GT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 37 IF_LE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 38 IF_EQZ vAA, +BBBB
+  DF_UA,
+
+  // 39 IF_NEZ vAA, +BBBB
+  DF_UA,
+
+  // 3A IF_LTZ vAA, +BBBB
+  DF_UA,
+
+  // 3B IF_GEZ vAA, +BBBB
+  DF_UA,
+
+  // 3C IF_GTZ vAA, +BBBB
+  DF_UA,
+
+  // 3D IF_LEZ vAA, +BBBB
+  DF_UA,
+
+  // 3E UNUSED_3E
+  DF_NOP,
+
+  // 3F UNUSED_3F
+  DF_NOP,
+
+  // 40 UNUSED_40
+  DF_NOP,
+
+  // 41 UNUSED_41
+  DF_NOP,
+
+  // 42 UNUSED_42
+  DF_NOP,
+
+  // 43 UNUSED_43
+  DF_NOP,
+
+  // 44 AGET vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 45 AGET_WIDE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 46 AGET_OBJECT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 47 AGET_BOOLEAN vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 48 AGET_BYTE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 49 AGET_CHAR vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4A AGET_SHORT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4B APUT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4C APUT_WIDE vAA, vBB, vCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+  // 4D APUT_OBJECT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 4E APUT_BOOLEAN vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4F APUT_BYTE vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 50 APUT_CHAR vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 51 APUT_SHORT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 52 IGET vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 53 IGET_WIDE vA, vB, field@CCCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 54 IGET_OBJECT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // 55 IGET_BOOLEAN vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 56 IGET_BYTE vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 57 IGET_CHAR vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 58 IGET_SHORT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 59 IPUT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5A IPUT_WIDE vA, vB, field@CCCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // 5B IPUT_OBJECT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5D IPUT_BYTE vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5E IPUT_CHAR vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5F IPUT_SHORT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 60 SGET vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 61 SGET_WIDE vAA, field@BBBB
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // 62 SGET_OBJECT vAA, field@BBBB
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // 63 SGET_BOOLEAN vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 64 SGET_BYTE vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 65 SGET_CHAR vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 66 SGET_SHORT vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 67 SPUT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 68 SPUT_WIDE vAA, field@BBBB
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // 69 SPUT_OBJECT vAA, field@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 6A SPUT_BOOLEAN vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6B SPUT_BYTE vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6C SPUT_CHAR vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6D SPUT_SHORT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 73 UNUSED_73
+  DF_NOP,
+
+  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 79 UNUSED_79
+  DF_NOP,
+
+  // 7A UNUSED_7A
+  DF_NOP,
+
+  // 7B NEG_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7C NOT_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7D NEG_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7E NOT_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7F NEG_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 80 NEG_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 81 INT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 82 INT_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 83 INT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 84 LONG_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 85 LONG_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 86 LONG_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 87 FLOAT_TO_INT vA, vB
+  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 88 FLOAT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 89 FLOAT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 8A DOUBLE_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8B DOUBLE_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8C DOUBLE_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 8D INT_TO_BYTE vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8E INT_TO_CHAR vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8F INT_TO_SHORT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 90 ADD_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 91 SUB_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 92 MUL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 93 DIV_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 94 REM_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 95 AND_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 96 OR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 97 XOR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 98 SHL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 99 SHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9A USHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9B ADD_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9C SUB_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9D MUL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9E DIV_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9F REM_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A0 AND_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A1 OR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A2 XOR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A3 SHL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A4 SHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A5 USHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A6 ADD_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A7 SUB_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A8 MUL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A9 DIV_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AA REM_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AB ADD_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AC SUB_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AD MUL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AE DIV_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AF REM_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // B0 ADD_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B1 SUB_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B2 MUL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B3 DIV_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B4 REM_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B5 AND_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B6 OR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B7 XOR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B8 SHL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B9 SHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BA USHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BB ADD_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BC SUB_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BD MUL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BE DIV_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BF REM_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C0 AND_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C1 OR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C2 XOR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C3 SHL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C4 SHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C5 USHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C6 ADD_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C7 SUB_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C8 MUL_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C9 DIV_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CA REM_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CB ADD_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CC SUB_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CD MUL_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CE DIV_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CF REM_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D1 RSUB_INT vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D4 REM_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D5 AND_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D6 OR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DA MUL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DB DIV_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DC REM_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DD AND_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DE OR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DF XOR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E3 IGET_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E4 IPUT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // E5 SGET_VOLATILE
+  DF_DA | DF_UMS,
+
+  // E6 SPUT_VOLATILE
+  DF_UA | DF_UMS,
+
+  // E7 IGET_OBJECT_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // E8 IGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E9 IPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // EA SGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // EB SPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // EC BREAKPOINT
+  DF_NOP,
+
+  // ED THROW_VERIFICATION_ERROR
+  DF_NOP | DF_UMS,
+
+  // EE EXECUTE_INLINE
+  DF_FORMAT_35C,
+
+  // EF EXECUTE_INLINE_RANGE
+  DF_FORMAT_3RC,
+
+  // F0 INVOKE_OBJECT_INIT_RANGE
+  DF_NOP | DF_NULL_CHK_0,
+
+  // F1 RETURN_VOID_BARRIER
+  DF_NOP,
+
+  // F2 IGET_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F3 IGET_WIDE_QUICK
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+  // F4 IGET_OBJECT_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F5 IPUT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F6 IPUT_WIDE_QUICK
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+  // F7 IPUT_OBJECT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F8 INVOKE_VIRTUAL_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // F9 INVOKE_VIRTUAL_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FA INVOKE_SUPER_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FB INVOKE_SUPER_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FC IPUT_OBJECT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // FD SGET_OBJECT_VOLATILE
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // FE SPUT_OBJECT_VOLATILE
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // FF UNUSED_FF
+  DF_NOP
+};
+} // end namespace sea_ir
diff --git a/compiler/sea_ir/instruction_tools.h b/compiler/sea_ir/instruction_tools.h
new file mode 100644
index 0000000..f68cdd0
--- /dev/null
+++ b/compiler/sea_ir/instruction_tools.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "dex_instruction.h"
+
+#ifndef INSTRUCTION_TOOLS_H_
+#define INSTRUCTION_TOOLS_H_
+
+// Note: This file has content cannibalized for SEA_IR from the MIR implementation,
+//       to avoid having a dependence on MIR.
+namespace sea_ir {
+
+#define DF_NOP                  0
+#define DF_UA                   (1 << kUA)
+#define DF_UB                   (1 << kUB)
+#define DF_UC                   (1 << kUC)
+#define DF_A_WIDE               (1 << kAWide)
+#define DF_B_WIDE               (1 << kBWide)
+#define DF_C_WIDE               (1 << kCWide)
+#define DF_DA                   (1 << kDA)
+#define DF_IS_MOVE              (1 << kIsMove)
+#define DF_SETS_CONST           (1 << kSetsConst)
+#define DF_FORMAT_35C           (1 << kFormat35c)
+#define DF_FORMAT_3RC           (1 << kFormat3rc)
+#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST         (1 << kDstNonNull)
+#define DF_NON_NULL_RET         (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
+#define DF_FP_A                 (1 << kFPA)
+#define DF_FP_B                 (1 << kFPB)
+#define DF_FP_C                 (1 << kFPC)
+#define DF_CORE_A               (1 << kCoreA)
+#define DF_CORE_B               (1 << kCoreB)
+#define DF_CORE_C               (1 << kCoreC)
+#define DF_REF_A                (1 << kRefA)
+#define DF_REF_B                (1 << kRefB)
+#define DF_REF_C                (1 << kRefC)
+#define DF_UMS                  (1 << kUsesMethodStar)
+
+#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS             (DF_DA)
+
+#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
+                                 DF_NULL_CHK_1 | \
+                                 DF_NULL_CHK_2 | \
+                                 DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
+                                 DF_RANGE_CHK_2 | \
+                                 DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
+                                 DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG             (DF_UA | DF_DA)
+#define DF_B_IS_REG             (DF_UB)
+#define DF_C_IS_REG             (DF_UC)
+#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
+
+enum DataFlowAttributePos {
+  kUA = 0,
+  kUB,
+  kUC,
+  kAWide,
+  kBWide,
+  kCWide,
+  kDA,
+  kIsMove,
+  kSetsConst,
+  kFormat35c,
+  kFormat3rc,
+  kNullCheckSrc0,        // Null check of uses[0].
+  kNullCheckSrc1,        // Null check of uses[1].
+  kNullCheckSrc2,        // Null check of uses[2].
+  kNullCheckOut0,        // Null check out outgoing arg0.
+  kDstNonNull,           // May assume dst is non-null.
+  kRetNonNull,           // May assume retval is non-null.
+  kNullTransferSrc0,     // Object copy src[0] -> dst.
+  kNullTransferSrcN,     // Phi null check state transfer.
+  kRangeCheckSrc1,       // Range check of uses[1].
+  kRangeCheckSrc2,       // Range check of uses[2].
+  kRangeCheckSrc3,       // Range check of uses[3].
+  kFPA,
+  kFPB,
+  kFPC,
+  kCoreA,
+  kCoreB,
+  kCoreC,
+  kRefA,
+  kRefB,
+  kRefC,
+  kUsesMethodStar,       // Implicit use of Method*.
+};
+
+class InstructionTools {
+ public:
+  static bool IsDefinition(const art::Instruction* instruction);
+  static const int instruction_attributes_[];
+};
+} // end namespace sea_ir
+#endif // INSTRUCTION_TOOLS_H_
diff --git a/compiler/sea_ir/sea.cc b/compiler/sea_ir/sea.cc
new file mode 100644
index 0000000..c5ec2b9
--- /dev/null
+++ b/compiler/sea_ir/sea.cc
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sea.h"
+
+#include "file_output_stream.h"
+#include "instruction_tools.h"
+
+
+#define MAX_REACHING_DEF_ITERERATIONS (10)
+
+namespace sea_ir {
+
+SeaGraph SeaGraph::graph_;
+int SeaNode::current_max_node_id_ = 0;
+
+
+SeaGraph* SeaGraph::GetCurrentGraph() {
+  return &sea_ir::SeaGraph::graph_;
+}
+
+void SeaGraph::DumpSea(std::string filename) const {
+  LOG(INFO) << "Starting to write SEA string to file.";
+  std::string result;
+  result += "digraph seaOfNodes {\n";
+  for (std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
+    (*cit)->ToDot(result);
+  }
+  result += "}\n";
+  art::File* file = art::OS::OpenFile(filename.c_str(), true, true);
+  art::FileOutputStream fos(file);
+  fos.WriteFully(result.c_str(), result.size());
+  LOG(INFO) << "Written SEA string to file.";
+}
+
+void SeaGraph::AddEdge(Region* src, Region* dst) const {
+  src->AddSuccessor(dst);
+  dst->AddPredecessor(src);
+}
+
+void SeaGraph::ComputeRPO(Region* current_region, int& current_rpo) {
+  current_region->SetRPO(VISITING);
+  std::vector<sea_ir::Region*>* succs = current_region->GetSuccessors();
+  for (std::vector<sea_ir::Region*>::iterator succ_it = succs->begin();
+      succ_it != succs->end(); ++succ_it) {
+    if (NOT_VISITED == (*succ_it)->GetRPO()) {
+      SeaGraph::ComputeRPO(*succ_it, current_rpo);
+    }
+  }
+  current_region->SetRPO(current_rpo--);
+}
+
+void SeaGraph::ComputeIDominators() {
+  bool changed = true;
+  while (changed) {
+    changed = false;
+    // Entry node has itself as IDOM.
+    std::vector<Region*>::iterator crt_it;
+    std::set<Region*> processedNodes;
+    // Find and mark the entry node(s).
+    for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+      if ((*crt_it)->GetPredecessors()->size() == 0) {
+        processedNodes.insert(*crt_it);
+        (*crt_it)->SetIDominator(*crt_it);
+      }
+    }
+    for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+      if ((*crt_it)->GetPredecessors()->size() == 0) {
+        continue;
+      }
+      // NewIDom = first (processed) predecessor of b.
+      Region* new_dom = NULL;
+      std::vector<Region*>* preds = (*crt_it)->GetPredecessors();
+      DCHECK(NULL != preds);
+      Region* root_pred = NULL;
+      for (std::vector<Region*>::iterator pred_it = preds->begin();
+          pred_it != preds->end(); ++pred_it) {
+        if (processedNodes.end() != processedNodes.find((*pred_it))) {
+          root_pred = *pred_it;
+          new_dom = root_pred;
+          break;
+        }
+      }
+      // For all other predecessors p of b, if idom is not set,
+      // then NewIdom = Intersect(p, NewIdom)
+      for (std::vector<Region*>::const_iterator pred_it = preds->begin();
+          pred_it != preds->end(); ++pred_it) {
+        DCHECK(NULL != *pred_it);
+        // if IDOMS[p] != UNDEFINED
+        if ((*pred_it != root_pred) && (*pred_it)->GetIDominator() != NULL) {
+          DCHECK(NULL != new_dom);
+          new_dom = SeaGraph::Intersect(*pred_it, new_dom);
+        }
+      }
+      DCHECK(NULL != *crt_it);
+      if ((*crt_it)->GetIDominator() != new_dom) {
+        (*crt_it)->SetIDominator(new_dom);
+        changed = true;
+      }
+      processedNodes.insert(*crt_it);
+    }
+  }
+
+  // For easily ordering of regions we need edges dominator->dominated.
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+      region_it != regions_.end(); region_it++) {
+    Region* idom = (*region_it)->GetIDominator();
+    if (idom != *region_it) {
+      idom->AddToIDominatedSet(*region_it);
+    }
+  }
+}
+
+Region* SeaGraph::Intersect(Region* i, Region* j) {
+  Region* finger1 = i;
+  Region* finger2 = j;
+  while (finger1 != finger2) {
+    while (finger1->GetRPO() > finger2->GetRPO()) {
+      DCHECK(NULL != finger1);
+      finger1 = finger1->GetIDominator(); // should have: finger1 != NULL
+      DCHECK(NULL != finger1);
+    }
+    while (finger1->GetRPO() < finger2->GetRPO()) {
+      DCHECK(NULL != finger2);
+      finger2 = finger2->GetIDominator(); // should have: finger1 != NULL
+      DCHECK(NULL != finger2);
+    }
+  }
+  return finger1; // finger1 should be equal to finger2 at this point.
+}
+
+void SeaGraph::ComputeDownExposedDefs() {
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+        region_it != regions_.end(); region_it++) {
+      (*region_it)->ComputeDownExposedDefs();
+    }
+}
+
+void SeaGraph::ComputeReachingDefs() {
+  // Iterate until the reaching definitions set doesn't change anymore.
+  // (See Cooper & Torczon, "Engineering a Compiler", second edition, page 487)
+  bool changed = true;
+  int iteration = 0;
+  while (changed && (iteration < MAX_REACHING_DEF_ITERERATIONS)) {
+    iteration++;
+    changed = false;
+    // TODO: optimize the ordering if this becomes performance bottleneck.
+    for (std::vector<Region*>::iterator regions_it = regions_.begin();
+        regions_it != regions_.end();
+        regions_it++) {
+      changed |= (*regions_it)->UpdateReachingDefs();
+    }
+  }
+  DCHECK(!changed) << "Reaching definitions computation did not reach a fixed point.";
+}
+
+
+void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
+    const art::DexFile& dex_file) {
+  const uint16_t* code = code_item->insns_;
+  const size_t size_in_code_units = code_item->insns_size_in_code_units_;
+  // This maps target instruction pointers to their corresponding region objects.
+  std::map<const uint16_t*, Region*> target_regions;
+  size_t i = 0;
+  // Pass: Find the start instruction of basic blocks
+  //         by locating targets and flow-though instructions of branches.
+  while (i < size_in_code_units) {
+    const art::Instruction* inst = art::Instruction::At(&code[i]);
+    if (inst->IsBranch() || inst->IsUnconditional()) {
+      int32_t offset = inst->GetTargetOffset();
+      if (target_regions.end() == target_regions.find(&code[i + offset])) {
+        Region* region = GetNewRegion();
+        target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i + offset], region));
+      }
+      if (inst->CanFlowThrough()
+          && (target_regions.end() == target_regions.find(&code[i + inst->SizeInCodeUnits()]))) {
+        Region* region = GetNewRegion();
+        target_regions.insert(
+            std::pair<const uint16_t*, Region*>(&code[i + inst->SizeInCodeUnits()], region));
+      }
+    }
+    i += inst->SizeInCodeUnits();
+  }
+  // Pass: Assign instructions to region nodes and
+  //         assign branches their control flow successors.
+  i = 0;
+  Region* r = GetNewRegion();
+  SignatureNode* parameter_def_node = new sea_ir::SignatureNode(code_item->registers_size_-1,
+        code_item->ins_size_);
+  r->AddChild(parameter_def_node);
+  sea_ir::InstructionNode* last_node = NULL;
+  sea_ir::InstructionNode* node = NULL;
+  while (i < size_in_code_units) {
+    const art::Instruction* inst = art::Instruction::At(&code[i]); //TODO: find workaround for this
+    last_node = node;
+    node = new sea_ir::InstructionNode(inst);
+
+    if (inst->IsBranch() || inst->IsUnconditional()) {
+      int32_t offset = inst->GetTargetOffset();
+      std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i + offset]);
+      DCHECK(it != target_regions.end());
+      AddEdge(r, it->second); // Add edge to branch target.
+    }
+
+    std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i]);
+    if (target_regions.end() != it) {
+      // Get the already created region because this is a branch target.
+      Region* nextRegion = it->second;
+      if (last_node->GetInstruction()->IsBranch()
+          && last_node->GetInstruction()->CanFlowThrough()) {
+        AddEdge(r, it->second); // Add flow-through edge.
+      }
+      r = nextRegion;
+    }
+    bool definesRegister = (0 != InstructionTools::instruction_attributes_[inst->Opcode()]
+        && (1 << kDA));
+    LOG(INFO)<< inst->GetDexPc(code) << "*** " << inst->DumpString(&dex_file)
+    << " region:" <<r->StringId() << "Definition?" << definesRegister << std::endl;
+    r->AddChild(node);
+    i += inst->SizeInCodeUnits();
+  }
+}
+
+void SeaGraph::ComputeRPO() {
+  int rpo_id = regions_.size() - 1;
+  for (std::vector<Region*>::const_iterator crt_it = regions_.begin(); crt_it != regions_.end();
+      ++crt_it) {
+    if ((*crt_it)->GetPredecessors()->size() == 0) {
+      ComputeRPO(*crt_it, rpo_id);
+    }
+  }
+}
+
+// Performs the renaming phase in traditional SSA transformations.
+// See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+void SeaGraph::RenameAsSSA() {
+  utils::ScopedHashtable<int, InstructionNode*> scoped_table;
+  scoped_table.OpenScope();
+  for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+      region_it++) {
+    if ((*region_it)->GetIDominator() == *region_it) {
+      RenameAsSSA(*region_it, &scoped_table);
+    }
+  }
+
+  scoped_table.CloseScope();
+}
+
+void SeaGraph::ConvertToSSA() {
+  // Pass: find global names.
+  // The map @block maps registers to the blocks in which they are defined.
+  std::map<int, std::set<Region*> > blocks;
+  // The set @globals records registers whose use
+  // is in a different block than the corresponding definition.
+  std::set<int> globals;
+  for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+      region_it++) {
+    std::set<int> var_kill;
+    std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
+    for (std::vector<InstructionNode*>::iterator inst_it = instructions->begin();
+        inst_it != instructions->end(); inst_it++) {
+      std::vector<int> used_regs = (*inst_it)->GetUses();
+      for (std::size_t i = 0; i < used_regs.size(); i++) {
+        int used_reg = used_regs[i];
+        if (var_kill.find(used_reg) == var_kill.end()) {
+          globals.insert(used_reg);
+        }
+      }
+      const int reg_def = (*inst_it)->GetResultRegister();
+      if (reg_def != NO_REGISTER) {
+        var_kill.insert(reg_def);
+      }
+
+      blocks.insert(std::pair<int, std::set<Region*> >(reg_def, std::set<Region*>()));
+      std::set<Region*>* reg_def_blocks = &(blocks.find(reg_def)->second);
+      reg_def_blocks->insert(*region_it);
+    }
+  }
+
+  // Pass: Actually add phi-nodes to regions.
+  for (std::set<int>::const_iterator globals_it = globals.begin();
+      globals_it != globals.end(); globals_it++) {
+    int global = *globals_it;
+    // Copy the set, because we will modify the worklist as we go.
+    std::set<Region*> worklist((*(blocks.find(global))).second);
+    for (std::set<Region*>::const_iterator b_it = worklist.begin(); b_it != worklist.end(); b_it++) {
+      std::set<Region*>* df = (*b_it)->GetDominanceFrontier();
+      for (std::set<Region*>::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) {
+        if ((*df_it)->InsertPhiFor(global)) {
+          // Check that the dominance frontier element is in the worklist already
+          // because we only want to break if the element is actually not there yet.
+          if (worklist.find(*df_it) == worklist.end()) {
+            worklist.insert(*df_it);
+            b_it = worklist.begin();
+            break;
+          }
+        }
+      }
+    }
+  }
+  // Pass: Build edges to the definition corresponding to each use.
+  // (This corresponds to the renaming phase in traditional SSA transformations.
+  // See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+  RenameAsSSA();
+}
+
+void SeaGraph::RenameAsSSA(Region* crt_region,
+    utils::ScopedHashtable<int, InstructionNode*>* scoped_table) {
+  scoped_table->OpenScope();
+  // Rename phi nodes defined in the current region.
+  std::vector<PhiInstructionNode*>* phis = crt_region->GetPhiNodes();
+  for (std::vector<PhiInstructionNode*>::iterator phi_it = phis->begin();
+      phi_it != phis->end(); phi_it++) {
+    int reg_no = (*phi_it)->GetRegisterNumber();
+    scoped_table->Add(reg_no, (*phi_it));
+  }
+  // Rename operands of instructions from the current region.
+  std::vector<InstructionNode*>* instructions = crt_region->GetInstructions();
+  for (std::vector<InstructionNode*>::const_iterator instructions_it = instructions->begin();
+      instructions_it != instructions->end(); instructions_it++) {
+    InstructionNode* current_instruction = (*instructions_it);
+    // Rename uses.
+    std::vector<int> used_regs = current_instruction->GetUses();
+    for (std::vector<int>::const_iterator reg_it = used_regs.begin();
+        reg_it != used_regs.end(); reg_it++) {
+      int current_used_reg = (*reg_it);
+      InstructionNode* definition = scoped_table->Lookup(current_used_reg);
+      current_instruction->RenameToSSA(current_used_reg, definition);
+    }
+    // Update scope table with latest definitions.
+    std::vector<int> def_regs = current_instruction->GetDefinitions();
+    for (std::vector<int>::const_iterator reg_it = def_regs.begin();
+            reg_it != def_regs.end(); reg_it++) {
+      int current_defined_reg = (*reg_it);
+      scoped_table->Add(current_defined_reg, current_instruction);
+    }
+  }
+  // Fill in uses of phi functions in CFG successor regions.
+  const std::vector<Region*>* successors = crt_region->GetSuccessors();
+  for (std::vector<Region*>::const_iterator successors_it = successors->begin();
+      successors_it != successors->end(); successors_it++) {
+    Region* successor = (*successors_it);
+    successor->SetPhiDefinitionsForUses(scoped_table, crt_region);
+  }
+
+  // Rename all successors in the dominators tree.
+  const std::set<Region*>* dominated_nodes = crt_region->GetIDominatedSet();
+  for (std::set<Region*>::const_iterator dominated_nodes_it = dominated_nodes->begin();
+      dominated_nodes_it != dominated_nodes->end(); dominated_nodes_it++) {
+    Region* dominated_node = (*dominated_nodes_it);
+    RenameAsSSA(dominated_node, scoped_table);
+  }
+  scoped_table->CloseScope();
+}
+
+void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item,
+  uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file) {
+  // Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
+  BuildMethodSeaGraph(code_item, dex_file);
+  //Pass: Compute reverse post-order of regions.
+  ComputeRPO();
+  // Multiple passes: compute immediate dominators.
+  ComputeIDominators();
+  // Pass: compute downward-exposed definitions.
+  ComputeDownExposedDefs();
+  // Multiple Passes (iterative fixed-point algorithm): Compute reaching definitions
+  ComputeReachingDefs();
+  // Pass (O(nlogN)): Compute the dominance frontier for region nodes.
+  ComputeDominanceFrontier();
+  // Two Passes: Phi node insertion.
+  ConvertToSSA();
+}
+
+
+void SeaGraph::ComputeDominanceFrontier() {
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+      region_it != regions_.end(); region_it++) {
+    std::vector<Region*>* preds = (*region_it)->GetPredecessors();
+    if (preds->size() > 1) {
+      for (std::vector<Region*>::iterator pred_it = preds->begin();
+          pred_it != preds->end(); pred_it++) {
+        Region* runner = *pred_it;
+        while (runner != (*region_it)->GetIDominator()) {
+          runner->AddToDominanceFrontier(*region_it);
+          runner = runner->GetIDominator();
+        }
+      }
+    }
+  }
+}
+
+Region* SeaGraph::GetNewRegion() {
+  Region* new_region = new Region();
+  AddRegion(new_region);
+  return new_region;
+}
+
+void SeaGraph::AddRegion(Region* r) {
+  DCHECK(r) << "Tried to add NULL region to SEA graph.";
+  regions_.push_back(r);
+}
+
+void SeaNode::AddSuccessor(Region* successor) {
+  DCHECK(successor) << "Tried to add NULL successor to SEA node.";
+  successors_.push_back(successor);
+  return;
+}
+
+void SeaNode::AddPredecessor(Region* predecessor) {
+  DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
+  predecessors_.push_back(predecessor);
+}
+
+void Region::AddChild(sea_ir::InstructionNode* instruction) {
+  DCHECK(instruction) << "Tried to add NULL instruction to region node.";
+  instructions_.push_back(instruction);
+}
+
+SeaNode* Region::GetLastChild() const {
+  if (instructions_.size() > 0) {
+    return instructions_.back();
+  }
+  return NULL;
+}
+
+void Region::ToDot(std::string& result) const {
+  result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "(rpo=";
+  std::stringstream ss;
+  ss << rpo_;
+  result.append(ss.str());
+  if (NULL != GetIDominator()) {
+    result += " dom=" + GetIDominator()->StringId();
+  }
+  result += ")\"];\n";
+
+  // Save phi-nodes.
+  for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
+      cit != phi_instructions_.end(); cit++) {
+    (*cit)->ToDot(result);
+    result += StringId() + " -> " + (*cit)->StringId() + "; // phi-function \n";
+  }
+
+  // Save instruction nodes.
+  for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
+      cit != instructions_.end(); cit++) {
+    (*cit)->ToDot(result);
+    result += StringId() + " -> " + (*cit)->StringId() + "; // region -> instruction \n";
+  }
+
+  for (std::vector<Region*>::const_iterator cit = successors_.begin(); cit != successors_.end();
+      cit++) {
+    DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << ".";
+    result += GetLastChild()->StringId() + " -> " + (*cit)->StringId() + ";\n\n";
+  }
+  // Save reaching definitions.
+  for (std::map<int, std::set<sea_ir::InstructionNode*>* >::const_iterator cit =
+      reaching_defs_.begin();
+      cit != reaching_defs_.end(); cit++) {
+    for (std::set<sea_ir::InstructionNode*>::const_iterator
+        reaching_set_it = (*cit).second->begin();
+        reaching_set_it != (*cit).second->end();
+        reaching_set_it++) {
+      result += (*reaching_set_it)->StringId() +
+         " -> " + StringId() +
+         " [style=dotted]; // Reaching def.\n";
+    }
+  }
+  // Save dominance frontier.
+  for (std::set<Region*>::const_iterator cit = df_.begin(); cit != df_.end(); cit++) {
+    result += StringId() +
+        " -> " + (*cit)->StringId() +
+        " [color=gray]; // Dominance frontier.\n";
+  }
+  result += "// End Region.\n";
+}
+
+void Region::ComputeDownExposedDefs() {
+  for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
+      inst_it != instructions_.end(); inst_it++) {
+    int reg_no = (*inst_it)->GetResultRegister();
+    std::map<int, InstructionNode*>::iterator res = de_defs_.find(reg_no);
+    if ((reg_no != NO_REGISTER) && (res == de_defs_.end())) {
+      de_defs_.insert(std::pair<int, InstructionNode*>(reg_no, *inst_it));
+    } else {
+      res->second = *inst_it;
+    }
+  }
+  for (std::map<int, sea_ir::InstructionNode*>::const_iterator cit = de_defs_.begin();
+      cit != de_defs_.end(); cit++) {
+    (*cit).second->MarkAsDEDef();
+  }
+}
+
+const std::map<int, sea_ir::InstructionNode*>* Region::GetDownExposedDefs() const {
+  return &de_defs_;
+}
+
+std::map<int, std::set<sea_ir::InstructionNode*>* >* Region::GetReachingDefs() {
+  return &reaching_defs_;
+}
+
+bool Region::UpdateReachingDefs() {
+  std::map<int, std::set<sea_ir::InstructionNode*>* > new_reaching;
+  for (std::vector<Region*>::const_iterator pred_it = predecessors_.begin();
+      pred_it != predecessors_.end(); pred_it++) {
+    // The reaching_defs variable will contain reaching defs __for current predecessor only__
+    std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs;
+    std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching = (*pred_it)->GetReachingDefs();
+    const std::map<int, InstructionNode*>* de_defs = (*pred_it)->GetDownExposedDefs();
+
+    // The definitions from the reaching set of the predecessor
+    // may be shadowed by downward exposed definitions from the predecessor,
+    // otherwise the defs from the reaching set are still good.
+    for (std::map<int, InstructionNode*>::const_iterator de_def = de_defs->begin();
+        de_def != de_defs->end(); de_def++) {
+      std::set<InstructionNode*>* solo_def;
+      solo_def = new std::set<InstructionNode*>();
+      solo_def->insert(de_def->second);
+      reaching_defs.insert(
+          std::pair<int const, std::set<InstructionNode*>*>(de_def->first, solo_def));
+    }
+    reaching_defs.insert(pred_reaching->begin(), pred_reaching->end());
+
+    // Now we combine the reaching map coming from the current predecessor (reaching_defs)
+    // with the accumulated set from all predecessors so far (from new_reaching).
+    std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs.begin();
+    for (; reaching_it != reaching_defs.end(); reaching_it++) {
+      std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator crt_entry =
+          new_reaching.find(reaching_it->first);
+      if (new_reaching.end() != crt_entry) {
+        crt_entry->second->insert(reaching_it->second->begin(), reaching_it->second->end());
+      } else {
+        new_reaching.insert(
+            std::pair<int, std::set<sea_ir::InstructionNode*>*>(
+                reaching_it->first,
+                reaching_it->second) );
+      }
+    }
+  }
+  bool changed = false;
+  // Because the sets are monotonically increasing,
+  // we can compare sizes instead of using set comparison.
+  // TODO: Find formal proof.
+  int old_size = 0;
+  if (-1 == reaching_defs_size_) {
+    std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs_.begin();
+    for (; reaching_it != reaching_defs_.end(); reaching_it++) {
+      old_size += (*reaching_it).second->size();
+    }
+  } else {
+    old_size = reaching_defs_size_;
+  }
+  int new_size = 0;
+  std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = new_reaching.begin();
+  for (; reaching_it != new_reaching.end(); reaching_it++) {
+    new_size += (*reaching_it).second->size();
+  }
+  if (old_size != new_size) {
+    changed = true;
+  }
+  if (changed) {
+    reaching_defs_ = new_reaching;
+    reaching_defs_size_ = new_size;
+  }
+  return changed;
+}
+
+bool Region::InsertPhiFor(int reg_no) {
+  if (!ContainsPhiFor(reg_no)) {
+    phi_set_.insert(reg_no);
+    PhiInstructionNode* new_phi = new PhiInstructionNode(reg_no);
+    phi_instructions_.push_back(new_phi);
+    return true;
+  }
+  return false;
+}
+
+void Region::SetPhiDefinitionsForUses(
+    const utils::ScopedHashtable<int, InstructionNode*>* scoped_table, Region* predecessor) {
+  int predecessor_id = -1;
+  for (unsigned int crt_pred_id = 0; crt_pred_id < predecessors_.size(); crt_pred_id++) {
+    if (predecessors_.at(crt_pred_id) == predecessor) {
+      predecessor_id = crt_pred_id;
+    }
+  }
+  DCHECK_NE(-1, predecessor_id);
+  for (std::vector<PhiInstructionNode*>::iterator phi_it = phi_instructions_.begin();
+      phi_it != phi_instructions_.end(); phi_it++) {
+    PhiInstructionNode* phi = (*phi_it);
+    int reg_no = phi->GetRegisterNumber();
+    InstructionNode* definition = scoped_table->Lookup(reg_no);
+    phi->RenameToSSA(reg_no, definition, predecessor_id);
+  }
+}
+
+void InstructionNode::ToDot(std::string& result) const {
+  result += "// Instruction: \n" + StringId() +
+      " [label=\"" + instruction_->DumpString(NULL) + "\"";
+  if (de_def_) {
+    result += "style=bold";
+  }
+  result += "];\n";
+  // SSA definitions:
+  for (std::map<int, InstructionNode* >::const_iterator def_it = definition_edges_.begin();
+      def_it != definition_edges_.end(); def_it++) {
+    if (NULL != def_it->second) {
+      result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"";
+      std::stringstream ss;
+      ss << def_it->first;
+      result.append(ss.str());
+      result += "\"] ; // ssa edge\n";
+    }
+  }
+}
+
+void InstructionNode::MarkAsDEDef() {
+  de_def_ = true;
+}
+
+int InstructionNode::GetResultRegister() const {
+  if (instruction_->HasVRegA()) {
+    return instruction_->VRegA();
+  }
+  return NO_REGISTER;
+}
+
+std::vector<int> InstructionNode::GetDefinitions() const {
+  // TODO: Extend this to handle instructions defining more than one register (if any)
+  // The return value should be changed to pointer to field then; for now it is an object
+  // so that we avoid possible memory leaks from allocating objects dynamically.
+  std::vector<int> definitions;
+  int result = GetResultRegister();
+  if (NO_REGISTER != result) {
+    definitions.push_back(result);
+  }
+  return definitions;
+}
+
+std::vector<int> InstructionNode::GetUses() {
+  std::vector<int> uses; // Using vector<> instead of set<> because order matters.
+
+  if (!InstructionTools::IsDefinition(instruction_) && (instruction_->HasVRegA())) {
+    int vA = instruction_->VRegA();
+    uses.push_back(vA);
+  }
+  if (instruction_->HasVRegB()) {
+    int vB = instruction_->VRegB();
+    uses.push_back(vB);
+  }
+  if (instruction_->HasVRegC()) {
+    int vC = instruction_->VRegC();
+    uses.push_back(vC);
+  }
+  // TODO: Add support for function argument registers.
+  return uses;
+}
+
+void PhiInstructionNode::ToDot(std::string& result) const {
+  result += "// PhiInstruction: \n" + StringId() +
+      " [label=\"" + "PHI(";
+  std::stringstream phi_reg_stream;
+  phi_reg_stream << register_no_;
+  result.append(phi_reg_stream.str());
+  result += ")\"";
+  result += "];\n";
+
+  for (std::vector<std::map<int, InstructionNode*>*>::const_iterator pred_it = definition_edges_.begin();
+      pred_it != definition_edges_.end(); pred_it++) {
+    std::map<int, InstructionNode*>* defs_from_pred = *pred_it;
+    for (std::map<int, InstructionNode* >::const_iterator def_it = defs_from_pred->begin();
+        def_it != defs_from_pred->end(); def_it++) {
+      if (NULL != def_it->second) {
+        result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"vR = ";
+        std::stringstream ss;
+        ss << def_it->first;
+        result.append(ss.str());
+        result += "\"] ; // phi-ssa edge\n";
+      } else {
+        result += StringId() + " -> " + StringId() +"[color=blue,label=\"vR = ";
+        std::stringstream ss;
+        ss << def_it->first;
+        result.append(ss.str());
+        result += "\"] ; // empty phi-ssa edge\n";
+      }
+    }
+  }
+}
+} // end namespace sea_ir
diff --git a/compiler/sea_ir/sea.h b/compiler/sea_ir/sea.h
new file mode 100644
index 0000000..a133678
--- /dev/null
+++ b/compiler/sea_ir/sea.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef SEA_IR_H_
+#define SEA_IR_H_
+
+#include <set>
+#include <map>
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "sea_ir/instruction_tools.h"
+#include "utils/scoped_hashtable.h"
+
+
+namespace sea_ir {
+
+#define NO_REGISTER             (-1)
+
+// Reverse post-order numbering constants
+enum RegionNumbering {
+  NOT_VISITED = -1,
+  VISITING = -2
+};
+
+class Region;
+class InstructionNode;
+class PhiInstructionNode;
+
+class SeaNode {
+ public:
+  explicit SeaNode():id_(GetNewId()), string_id_(), successors_(), predecessors_() {
+    std::stringstream ss;
+    ss << id_;
+    string_id_.append(ss.str());
+  }
+  // Adds CFG predecessors and successors to each block.
+  void AddSuccessor(Region* successor);
+  void AddPredecessor(Region* predecesor);
+
+  std::vector<sea_ir::Region*>* GetSuccessors() {
+    return &successors_;
+  }
+  std::vector<sea_ir::Region*>* GetPredecessors() {
+    return &predecessors_;
+  }
+  // Returns the id of the current block as string
+  const std::string& StringId() const {
+    return string_id_;
+  }
+  // Appends to @result a dot language formatted string representing the node and
+  //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
+  //    builds a complete dot graph, but without prolog ("digraph {") and epilog ("}").
+  virtual void ToDot(std::string& result) const = 0;
+
+  virtual ~SeaNode() {}
+
+ protected:
+  static int GetNewId() {
+    return current_max_node_id_++;
+  }
+
+  const int id_;
+  std::string string_id_;
+  std::vector<sea_ir::Region*> successors_;    // CFG successor nodes (regions)
+  std::vector<sea_ir::Region*> predecessors_;  // CFG predecessor nodes (instructions/regions)
+
+ private:
+  static int current_max_node_id_;
+};
+
+class InstructionNode: public SeaNode {
+ public:
+  explicit InstructionNode(const art::Instruction* in):
+    SeaNode(), instruction_(in), de_def_(false) {}
+  // Returns the Dalvik instruction around which this InstructionNode is wrapped.
+  const art::Instruction* GetInstruction() const {
+    DCHECK(NULL != instruction_) << "Tried to access NULL instruction in an InstructionNode.";
+    return instruction_;
+  }
+  // Returns the register that is defined by the current instruction, or NO_REGISTER otherwise.
+  virtual int GetResultRegister() const;
+  // Returns the set of registers defined by the current instruction.
+  virtual std::vector<int> GetDefinitions() const;
+  // Returns the set of register numbers that are used by the instruction.
+  virtual std::vector<int> GetUses();
+  // Appends to @result the .dot string representation of the instruction.
+  void ToDot(std::string& result) const;
+  // Mark the current instruction as a dowward exposed definition.
+  void MarkAsDEDef();
+  // Rename the use of @reg_no to refer to the instruction @definition,
+  // essentially creating SSA form.
+  void RenameToSSA(int reg_no, InstructionNode* definition) {
+    definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
+  }
+
+ private:
+  const art::Instruction* const instruction_;
+  std::map<int, InstructionNode* > definition_edges_;
+  bool de_def_;
+};
+
+class SignatureNode: public InstructionNode {
+ public:
+  explicit SignatureNode(unsigned int start_register, unsigned int count):
+      InstructionNode(NULL), defined_regs_() {
+    for (unsigned int crt_offset = 0; crt_offset < count; crt_offset++) {
+      defined_regs_.push_back(start_register - crt_offset);
+    }
+  }
+
+  void ToDot(std::string& result) const {
+    result += StringId() +"[label=\"signature:";
+    std::stringstream vector_printer;
+    if (!defined_regs_.empty()) {
+      for (unsigned int crt_el = 0; crt_el < defined_regs_.size()-1; crt_el++) {
+        vector_printer << defined_regs_[crt_el] <<",";
+      }
+      vector_printer << defined_regs_[defined_regs_.size()-1] <<";";
+    }
+    result += "\"] // signature node\n";
+  }
+
+  std::vector<int> GetDefinitions() const {
+    return defined_regs_;
+  }
+
+  int GetResultRegister() const {
+    return NO_REGISTER;
+  }
+
+  std::vector<int> GetUses() {
+    return std::vector<int>();
+  }
+
+ private:
+  std::vector<int> defined_regs_;
+};
+
+
+class PhiInstructionNode: public InstructionNode {
+ public:
+  explicit PhiInstructionNode(int register_no):
+    InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
+  // Appends to @result the .dot string representation of the instruction.
+  void ToDot(std::string& result) const;
+  // Returns the register on which this phi-function is used.
+  int GetRegisterNumber() {
+    return register_no_;
+  }
+
+  // Rename the use of @reg_no to refer to the instruction @definition.
+  // Phi-functions are different than normal instructions in that they
+  // have multiple predecessor regions; this is why RenameToSSA has
+  // the additional parameter specifying that @parameter_id is the incoming
+  // edge for @definition, essentially creating SSA form.
+  void RenameToSSA(int reg_no, InstructionNode* definition, unsigned int predecessor_id) {
+    DCHECK(NULL != definition) << "Tried to rename to SSA using a NULL definition for "
+        << StringId() << " register " << reg_no;
+    if (definition_edges_.size() < predecessor_id+1) {
+      definition_edges_.resize(predecessor_id+1, NULL);
+    }
+
+    if (NULL == definition_edges_.at(predecessor_id)) {
+      definition_edges_[predecessor_id] = new std::map<int, InstructionNode*>();
+    }
+    definition_edges_[predecessor_id]->insert(std::pair<int, InstructionNode*>(reg_no, definition));
+  }
+
+ private:
+  int register_no_;
+  std::vector<std::map<int, InstructionNode*>*> definition_edges_;
+};
+
+class Region : public SeaNode {
+ public:
+  explicit Region():
+    SeaNode(), reaching_defs_size_(0), rpo_(NOT_VISITED), idom_(NULL),
+    idominated_set_(), df_(), phi_set_() {}
+
+  // Adds @instruction as an instruction node child in the current region.
+  void AddChild(sea_ir::InstructionNode* insttruction);
+
+  // Returns the last instruction node child of the current region.
+  // This child has the CFG successors pointing to the new regions.
+  SeaNode* GetLastChild() const;
+  // Returns all the child instructions of this region, in program order.
+  std::vector<InstructionNode*>* GetInstructions() {
+    return &instructions_;
+  }
+  // Appends to @result a dot language formatted string representing the node and
+  //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
+  //    builds a complete dot graph (without prolog and epilog though).
+  virtual void ToDot(std::string& result) const;
+  // Computes Downward Exposed Definitions for the current node.
+
+  void ComputeDownExposedDefs();
+  const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
+
+  // Performs one iteration of the reaching definitions algorithm
+  // and returns true if the reaching definitions set changed.
+  bool UpdateReachingDefs();
+  // Returns the set of reaching definitions for the current region.
+  std::map<int, std::set<sea_ir::InstructionNode*>* >* GetReachingDefs();
+
+  void SetRPO(int rpo) {
+    rpo_ = rpo;
+  }
+
+  int GetRPO() {
+    return rpo_;
+  }
+
+  void SetIDominator(Region* dom) {
+    idom_ = dom;
+  }
+
+  Region* GetIDominator() const {
+    return idom_;
+  }
+
+  void AddToIDominatedSet(Region* dominated) {
+    idominated_set_.insert(dominated);
+  }
+
+  const std::set<Region*>* GetIDominatedSet() {
+    return &idominated_set_;
+  }
+
+  // Adds @df_reg to the dominance frontier of the current region.
+  void AddToDominanceFrontier(Region* df_reg) {
+    df_.insert(df_reg);
+  }
+  // Returns the dominance frontier of the current region.
+  // Preconditions: SeaGraph.ComputeDominanceFrontier()
+  std::set<Region*>* GetDominanceFrontier() {
+    return &df_;
+  }
+  // Returns true if the region contains a phi function for @reg_no.
+  bool ContainsPhiFor(int reg_no) {
+    return (phi_set_.end() != phi_set_.find(reg_no));
+  }
+  // Returns the phi-functions from the region.
+  std::vector<PhiInstructionNode*>* GetPhiNodes() {
+    return &phi_instructions_;
+  }
+  // Adds a phi-function for @reg_no to this region.
+  // Note: The insertion order does not matter, as phi-functions
+  //       are conceptually executed at the same time.
+  bool InsertPhiFor(int reg_no);
+  // Sets the phi-function uses to be as defined in @scoped_table for predecessor @@predecessor.
+  void SetPhiDefinitionsForUses(const utils::ScopedHashtable<int, InstructionNode*>* scoped_table,
+      Region* predecessor);
+
+ private:
+  std::vector<sea_ir::InstructionNode*> instructions_;
+  std::map<int, sea_ir::InstructionNode*> de_defs_;
+  std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs_;
+  int reaching_defs_size_;
+  int rpo_;
+  // Immediate dominator node.
+  Region* idom_;
+  // The set of nodes immediately dominated by the region.
+  std::set<Region*> idominated_set_;
+  // Records the dominance frontier.
+  std::set<Region*> df_;
+  // Records the set of register numbers that have phi nodes in this region.
+  std::set<int> phi_set_;
+  std::vector<PhiInstructionNode*> phi_instructions_;
+};
+
+class SeaGraph {
+ public:
+  static SeaGraph* GetCurrentGraph();
+
+  void CompileMethod(const art::DexFile::CodeItem* code_item,
+      uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file);
+  // Returns a string representation of the region and its Instruction children.
+  void DumpSea(std::string filename) const;
+  // Recursively computes the reverse postorder value for @crt_bb and successors.
+  static void ComputeRPO(Region* crt_bb, int& crt_rpo);
+  // Returns the "lowest common ancestor" of @i and @j in the dominator tree.
+  static Region* Intersect(Region* i, Region* j);
+
+ private:
+  // Registers @childReg as a region belonging to the SeaGraph instance.
+  void AddRegion(Region* childReg);
+  // Returns new region and registers it with the  SeaGraph instance.
+  Region* GetNewRegion();
+  // Adds a CFG edge from @src node to @dst node.
+  void AddEdge(Region* src, Region* dst) const;
+  // Builds the non-SSA sea-ir representation of the function @code_item from @dex_file.
+  void BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item, const art::DexFile& dex_file);
+  // Computes immediate dominators for each region.
+  // Precondition: ComputeMethodSeaGraph()
+  void ComputeIDominators();
+  // Computes Downward Exposed Definitions for all regions in the graph.
+  void ComputeDownExposedDefs();
+  // Computes the reaching definitions set following the equations from
+  // Cooper & Torczon, "Engineering a Compiler", second edition, page 491.
+  // Precondition: ComputeDEDefs()
+  void ComputeReachingDefs();
+  // Computes the reverse-postorder numbering for the region nodes.
+  // Precondition: ComputeDEDefs()
+  void ComputeRPO();
+  // Computes the dominance frontier for all regions in the graph,
+  // following the algorithm from
+  // Cooper & Torczon, "Engineering a Compiler", second edition, page 499.
+  // Precondition: ComputeIDominators()
+  void ComputeDominanceFrontier();
+
+  void ConvertToSSA();
+  // Identifies the definitions corresponding to uses for region @node
+  // by using the scoped hashtable of names @ scoped_table.
+  void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
+  void RenameAsSSA();
+
+  static SeaGraph graph_;
+  std::vector<Region*> regions_;
+};
+} // end namespace sea_ir
+#endif
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
new file mode 100644
index 0000000..a7eea51
--- /dev/null
+++ b/compiler/stubs/portable/stubs.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stubs/stubs.h"
+
+#include "jni_internal.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/utils/mips/assembler_mips.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "stack_indirect_reference_table.h"
+#include "sirt_ref.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+  RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
+
+  __ PushList(save);
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
+  __ mov(R2, ShifterOperand(SP));  // Pass sp for Method** callee_addr
+  __ IncreaseFrameSize(12);         // 3 words of space for alignment
+  // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
+  __ blx(R12);
+  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
+  __ DecreaseFrameSize(12);
+  __ PopList(save);
+  __ cmp(R12, ShifterOperand(0));
+  __ bx(R12, NE);                   // If R12 != 0 tail call method's code
+  __ bx(LR);                        // Return to caller to handle exception
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+} // namespace arm
+
+namespace mips {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+  // Build frame and save argument registers and RA.
+  __ AddConstant(SP, SP, -32);
+  __ StoreToOffset(kStoreWord, RA, SP, 28);
+  __ StoreToOffset(kStoreWord, A3, SP, 12);
+  __ StoreToOffset(kStoreWord, A2, SP, 8);
+  __ StoreToOffset(kStoreWord, A1, SP, 4);
+  __ StoreToOffset(kStoreWord, A0, SP, 0);
+
+  __ LoadFromOffset(kLoadWord, T9, S1,
+                    ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+  __ Move(A3, S1);  // Pass Thread::Current() in A3
+  __ Move(A2, SP);  // Pass SP for Method** callee_addr
+  __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
+
+  // Restore frame, argument registers, and RA.
+  __ LoadFromOffset(kLoadWord, A0, SP, 0);
+  __ LoadFromOffset(kLoadWord, A1, SP, 4);
+  __ LoadFromOffset(kLoadWord, A2, SP, 8);
+  __ LoadFromOffset(kLoadWord, A3, SP, 12);
+  __ LoadFromOffset(kLoadWord, RA, SP, 28);
+  __ AddConstant(SP, SP, 32);
+
+  Label resolve_fail;
+  __ EmitBranch(V0, ZERO, &resolve_fail, true);
+  __ Jr(V0); // If V0 != 0 tail call method's code
+  __ Bind(&resolve_fail, false);
+  __ Jr(RA); // Return to caller to handle exception
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+} // namespace mips
+
+namespace x86 {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  __ pushl(EBP);
+  __ movl(EBP, ESP);          // save ESP
+  __ subl(ESP, Immediate(8));  // Align stack
+  __ movl(EAX, Address(EBP, 8));  // Method* called
+  __ leal(EDX, Address(EBP, 8));  // Method** called_addr
+  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass thread
+  __ pushl(EDX);  // pass called_addr
+  __ pushl(ECX);  // pass receiver
+  __ pushl(EAX);  // pass called
+  // Call to resolve method.
+  __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
+          X86ManagedRegister::FromCpuRegister(ECX));
+  __ leave();
+
+  Label resolve_fail;  // forward declaration
+  __ cmpl(EAX, Immediate(0));
+  __ j(kEqual, &resolve_fail);
+  __ jmp(EAX);
+  // Tail call to intended method.
+  __ Bind(&resolve_fail);
+  __ ret();
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+} // namespace x86
+
+} // namespace art
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
new file mode 100644
index 0000000..790b5d6
--- /dev/null
+++ b/compiler/stubs/quick/stubs.cc
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stubs/stubs.h"
+
+#include "jni_internal.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/utils/mips/assembler_mips.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "sirt_ref.h"
+#include "stack_indirect_reference_table.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+  // | Out args |
+  // | Method*  | <- SP on entry
+  // | LR       |    return address into caller
+  // | ...      |    callee saves
+  // | R3       |    possible argument
+  // | R2       |    possible argument
+  // | R1       |    possible argument
+  // | R0       |    junk on call to QuickResolutionTrampolineFromCode, holds result Method*
+  // | Method*  |    Callee save Method* set up by QuickResoltuionTrampolineFromCode
+  // Save callee saves and ready frame for exception delivery
+  RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) |
+                 (1 << R10) | (1 << R11) | (1 << LR);
+  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
+  // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
+  __ PushList(save);
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
+  __ IncreaseFrameSize(8);         // 2 words of space for alignment
+  __ mov(R2, ShifterOperand(SP));  // Pass SP
+  // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
+  __ blx(R12);
+  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
+  // Restore registers which may have been modified by GC, "R0" will hold the Method*
+  __ DecreaseFrameSize(4);
+  __ PopList((1 << R0) | save);
+  __ bx(R12);  // Leaf call to method's code
+  __ bkpt(0);
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+  __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+  __ bkpt(0);
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+  __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
+  __ bkpt(0);
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+} // namespace arm
+
+namespace mips {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+  // | Out args   |
+  // | Method*    | <- SP on entry
+  // | RA         |    return address into caller
+  // | ...        |    callee saves
+  // | A3         |    possible argument
+  // | A2         |    possible argument
+  // | A1         |    possible argument
+  // | A0/Method* |    Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode
+  // Save callee saves and ready frame for exception delivery
+  __ AddConstant(SP, SP, -64);
+  __ StoreToOffset(kStoreWord, RA, SP, 60);
+  __ StoreToOffset(kStoreWord, FP, SP, 56);
+  __ StoreToOffset(kStoreWord, GP, SP, 52);
+  __ StoreToOffset(kStoreWord, S7, SP, 48);
+  __ StoreToOffset(kStoreWord, S6, SP, 44);
+  __ StoreToOffset(kStoreWord, S5, SP, 40);
+  __ StoreToOffset(kStoreWord, S4, SP, 36);
+  __ StoreToOffset(kStoreWord, S3, SP, 32);
+  __ StoreToOffset(kStoreWord, S2, SP, 28);
+  __ StoreToOffset(kStoreWord, A3, SP, 12);
+  __ StoreToOffset(kStoreWord, A2, SP, 8);
+  __ StoreToOffset(kStoreWord, A1, SP, 4);
+
+  __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+  __ Move(A3, S1);  // Pass Thread::Current() in A3
+  __ Move(A2, SP);  // Pass SP for Method** callee_addr
+  __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
+
+  // Restore registers which may have been modified by GC
+  __ LoadFromOffset(kLoadWord, A0, SP, 0);
+  __ LoadFromOffset(kLoadWord, A1, SP, 4);
+  __ LoadFromOffset(kLoadWord, A2, SP, 8);
+  __ LoadFromOffset(kLoadWord, A3, SP, 12);
+  __ LoadFromOffset(kLoadWord, S2, SP, 28);
+  __ LoadFromOffset(kLoadWord, S3, SP, 32);
+  __ LoadFromOffset(kLoadWord, S4, SP, 36);
+  __ LoadFromOffset(kLoadWord, S5, SP, 40);
+  __ LoadFromOffset(kLoadWord, S6, SP, 44);
+  __ LoadFromOffset(kLoadWord, S7, SP, 48);
+  __ LoadFromOffset(kLoadWord, GP, SP, 52);
+  __ LoadFromOffset(kLoadWord, FP, SP, 56);
+  __ LoadFromOffset(kLoadWord, RA, SP, 60);
+  __ AddConstant(SP, SP, 64);
+
+  __ Move(T9, V0); // Put method's code in T9
+  __ Jr(T9);  // Leaf call to method's code
+
+  __ Break();
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+  __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+  __ Jr(T9);
+  __ Break();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+  __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+  __ Jr(T9);
+  __ Break();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+} // namespace mips
+
+namespace x86 {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+  // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+  // return address
+  __ pushl(EDI);
+  __ pushl(ESI);
+  __ pushl(EBP);
+  __ pushl(EBX);
+  __ pushl(EDX);
+  __ pushl(ECX);
+  __ pushl(EAX);  // <-- callee save Method* to go here
+  __ movl(EDX, ESP);          // save ESP
+  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass Thread*
+  __ pushl(EDX);              // pass ESP for Method*
+  __ pushl(ECX);              // pass receiver
+  __ pushl(EAX);              // pass Method*
+
+  // Call to resolve method.
+  __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
+          X86ManagedRegister::FromCpuRegister(ECX));
+
+  __ movl(EDI, EAX);  // save code pointer in EDI
+  __ addl(ESP, Immediate(16));  // Pop arguments
+  __ popl(EAX);  // Restore args.
+  __ popl(ECX);
+  __ popl(EDX);
+  __ popl(EBX);
+  __ popl(EBP);  // Restore callee saves.
+  __ popl(ESI);
+  // Swap EDI callee save with code pointer
+  __ xchgl(EDI, Address(ESP, 0));
+  // Tail call to intended method.
+  __ ret();
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+} // namespace x86
+
+} // namespace art
diff --git a/compiler/stubs/stubs.h b/compiler/stubs/stubs.h
new file mode 100644
index 0000000..ebe761d
--- /dev/null
+++ b/compiler/stubs/stubs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_STUBS_STUBS_H_
+#define ART_SRC_COMPILER_STUBS_STUBS_H_
+
+#include "runtime.h"
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+namespace mips {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+namespace x86 {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_STUBS_STUBS_H_