Merge "X86: Add support for ucomis[sd] reg/memory form"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index fdfd94c..ff41736 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -29,6 +29,7 @@
   GetMethodSignature \
   Instrumentation \
   Interfaces \
+  Lookup \
   Main \
   MultiDex \
   MultiDexModifiedSecondary \
@@ -78,6 +79,7 @@
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
+ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
 
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
@@ -220,6 +222,7 @@
   runtime/reference_table_test.cc \
   runtime/thread_pool_test.cc \
   runtime/transaction_test.cc \
+  runtime/type_lookup_table_test.cc \
   runtime/utf_test.cc \
   runtime/utils_test.cc \
   runtime/verifier/method_verifier_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index aaac126..e74a68f 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -155,6 +155,7 @@
 	dex/quick/mips/utility_mips.cc \
 	jni/quick/mips/calling_convention_mips.cc \
 	optimizing/code_generator_mips.cc \
+	optimizing/intrinsics_mips.cc \
 	utils/mips/assembler_mips.cc \
 	utils/mips/managed_register_mips.cc \
 
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index e1a2838..eaf2408 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -756,14 +756,7 @@
     return *class_index;
   }
 
-  const DexFile::StringId* string_id = dex_file->FindStringId(kClassCacheNames[index]);
-  if (string_id == nullptr) {
-    *class_index = kIndexNotFound;
-    return *class_index;
-  }
-  uint32_t string_index = dex_file->GetIndexForStringId(*string_id);
-
-  const DexFile::TypeId* type_id = dex_file->FindTypeId(string_index);
+  const DexFile::TypeId* type_id = dex_file->FindTypeId(kClassCacheNames[index]);
   if (type_id == nullptr) {
     *class_index = kIndexNotFound;
     return *class_index;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 18c2e55..24daf2f 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -67,7 +67,6 @@
       false,
       false,
       nullptr,
-      new PassManagerOptions(),
       nullptr,
       false);
     VerificationResults verification_results(&compiler_options);
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index d9571c5..e977ebf 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -50,7 +50,6 @@
         false,
         false,
         nullptr,
-        new PassManagerOptions(),
         nullptr,
         false));
     verification_results_.reset(new VerificationResults(compiler_options_.get()));
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 1a7dbe3..14ba81d 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -187,15 +187,11 @@
         // Search dex file for localized ssb index, may fail if member's class is a parent
         // of the class mentioned in the dex file and there is no dex cache entry.
         std::string temp;
-        const DexFile::StringId* string_id =
-            dex_file->FindStringId(resolved_member->GetDeclaringClass()->GetDescriptor(&temp));
-        if (string_id != nullptr) {
-          const DexFile::TypeId* type_id =
-             dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
-          if (type_id != nullptr) {
-            // medium path, needs check of static storage base being initialized
-            storage_idx = dex_file->GetIndexForTypeId(*type_id);
-          }
+        const DexFile::TypeId* type_id =
+           dex_file->FindTypeId(resolved_member->GetDeclaringClass()->GetDescriptor(&temp));
+        if (type_id != nullptr) {
+          // medium path, needs check of static storage base being initialized
+          storage_idx = dex_file->GetIndexForTypeId(*type_id);
         }
       }
       if (storage_idx != DexFile::kDexNoIndex) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8750aa8..fb116bb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -375,6 +375,7 @@
       timings_logger_(timer),
       compiler_context_(nullptr),
       support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
+      dex_files_for_oat_file_(nullptr),
       compiled_method_storage_(swap_fd) {
   DCHECK(compiler_options_ != nullptr);
   DCHECK(verification_results_ != nullptr);
@@ -1371,8 +1372,7 @@
 }
 
 DexCacheArraysLayout CompilerDriver::GetDexCacheArraysLayout(const DexFile* dex_file) {
-  // Currently only image dex caches have fixed array layout.
-  return IsImage() && GetSupportBootImageFixup()
+  return ContainsElement(GetDexFilesForOatFile(), dex_file)
       ? DexCacheArraysLayout(GetInstructionSetPointerSize(instruction_set_), dex_file)
       : DexCacheArraysLayout();
 }
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 485cdcf..4ed4dc6 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -39,6 +39,7 @@
 #include "runtime.h"
 #include "safe_map.h"
 #include "thread_pool.h"
+#include "utils/array_ref.h"
 #include "utils/dex_cache_arrays_layout.h"
 
 namespace art {
@@ -101,7 +102,20 @@
 
   ~CompilerDriver();
 
-  void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+  // Set dex files that will be stored in the oat file after being compiled.
+  void SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
+    dex_files_for_oat_file_ = &dex_files;
+  }
+
+  // Get dex file that will be stored in the oat file after being compiled.
+  ArrayRef<const DexFile* const> GetDexFilesForOatFile() const {
+    return (dex_files_for_oat_file_ != nullptr)
+        ? ArrayRef<const DexFile* const>(*dex_files_for_oat_file_)
+        : ArrayRef<const DexFile* const>();
+  }
+
+  void CompileAll(jobject class_loader,
+                  const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
 
@@ -661,6 +675,9 @@
 
   bool support_boot_image_fixup_;
 
+  // List of dex files that will be stored in the oat file.
+  const std::vector<const DexFile*>* dex_files_for_oat_file_;
+
   CompiledMethodStorage compiled_method_storage_;
 
   friend class CompileClassVisitor;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 3f5a1ea..a24c8a3 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -16,6 +16,8 @@
 
 #include "compiler_options.h"
 
+#include <fstream>
+
 #include "dex/pass_manager.h"
 
 namespace art {
@@ -27,8 +29,8 @@
       small_method_threshold_(kDefaultSmallMethodThreshold),
       tiny_method_threshold_(kDefaultTinyMethodThreshold),
       num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
-      inline_depth_limit_(kDefaultInlineDepthLimit),
-      inline_max_code_units_(kDefaultInlineMaxCodeUnits),
+      inline_depth_limit_(kUnsetInlineDepthLimit),
+      inline_max_code_units_(kUnsetInlineMaxCodeUnits),
       include_patch_information_(kDefaultIncludePatchInformation),
       top_k_profile_threshold_(kDefaultTopKProfileThreshold),
       debuggable_(false),
@@ -38,7 +40,7 @@
       implicit_suspend_checks_(false),
       compile_pic_(false),
       verbose_methods_(nullptr),
-      pass_manager_options_(new PassManagerOptions),
+      pass_manager_options_(),
       abort_on_hard_verifier_failure_(false),
       init_failure_output_(nullptr) {
 }
@@ -65,7 +67,6 @@
                                  bool implicit_suspend_checks,
                                  bool compile_pic,
                                  const std::vector<std::string>* verbose_methods,
-                                 PassManagerOptions* pass_manager_options,
                                  std::ostream* init_failure_output,
                                  bool abort_on_hard_verifier_failure
                                  ) :  // NOLINT(whitespace/parens)
@@ -86,9 +87,155 @@
     implicit_suspend_checks_(implicit_suspend_checks),
     compile_pic_(compile_pic),
     verbose_methods_(verbose_methods),
-    pass_manager_options_(pass_manager_options),
+    pass_manager_options_(),
     abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
     init_failure_output_(init_failure_output) {
 }
 
+void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage);
+}
+
+void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage);
+}
+
+void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage);
+}
+
+void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage);
+}
+
+void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage);
+}
+
+void CompilerOptions::ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--inline-depth-limit", &inline_depth_limit_, Usage);
+}
+
+void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
+  ParseUintOption(option, "--inline-max-code-units=", &inline_max_code_units_, Usage);
+}
+
+void CompilerOptions::ParseDisablePasses(const StringPiece& option,
+                                         UsageFn Usage ATTRIBUTE_UNUSED) {
+  DCHECK(option.starts_with("--disable-passes="));
+  const std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
+  pass_manager_options_.SetDisablePassList(disable_passes);
+}
+
+void CompilerOptions::ParsePrintPasses(const StringPiece& option,
+                                       UsageFn Usage ATTRIBUTE_UNUSED) {
+  DCHECK(option.starts_with("--print-passes="));
+  const std::string print_passes = option.substr(strlen("--print-passes=")).data();
+  pass_manager_options_.SetPrintPassList(print_passes);
+}
+
+void CompilerOptions::ParseDumpCfgPasses(const StringPiece& option,
+                                         UsageFn Usage ATTRIBUTE_UNUSED) {
+  DCHECK(option.starts_with("--dump-cfg-passes="));
+  const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
+  pass_manager_options_.SetDumpPassList(dump_passes_string);
+}
+
+void CompilerOptions::ParsePassOptions(const StringPiece& option,
+                                       UsageFn Usage ATTRIBUTE_UNUSED) {
+  DCHECK(option.starts_with("--pass-options="));
+  const std::string pass_options = option.substr(strlen("--pass-options=")).data();
+  pass_manager_options_.SetOverriddenPassOptions(pass_options);
+}
+
+void CompilerOptions::ParseDumpInitFailures(const StringPiece& option,
+                                            UsageFn Usage ATTRIBUTE_UNUSED) {
+  DCHECK(option.starts_with("--dump-init-failures="));
+  std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
+  init_failure_output_.reset(new std::ofstream(file_name));
+  if (init_failure_output_.get() == nullptr) {
+    LOG(ERROR) << "Failed to allocate ofstream";
+  } else if (init_failure_output_->fail()) {
+    LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
+               << "failures.";
+    init_failure_output_.reset();
+  }
+}
+
+bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) {
+  if (option.starts_with("--compiler-filter=")) {
+    const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
+    if (strcmp(compiler_filter_string, "verify-none") == 0) {
+      compiler_filter_ = CompilerOptions::kVerifyNone;
+    } else if (strcmp(compiler_filter_string, "interpret-only") == 0) {
+      compiler_filter_ = CompilerOptions::kInterpretOnly;
+    } else if (strcmp(compiler_filter_string, "verify-at-runtime") == 0) {
+      compiler_filter_ = CompilerOptions::kVerifyAtRuntime;
+    } else if (strcmp(compiler_filter_string, "space") == 0) {
+      compiler_filter_ = CompilerOptions::kSpace;
+    } else if (strcmp(compiler_filter_string, "balanced") == 0) {
+      compiler_filter_ = CompilerOptions::kBalanced;
+    } else if (strcmp(compiler_filter_string, "speed") == 0) {
+      compiler_filter_ = CompilerOptions::kSpeed;
+    } else if (strcmp(compiler_filter_string, "everything") == 0) {
+      compiler_filter_ = CompilerOptions::kEverything;
+    } else if (strcmp(compiler_filter_string, "time") == 0) {
+      compiler_filter_ = CompilerOptions::kTime;
+    } else {
+      Usage("Unknown --compiler-filter value %s", compiler_filter_string);
+    }
+  } else if (option == "--compile-pic") {
+    compile_pic_ = true;
+  } else if (option.starts_with("--huge-method-max=")) {
+    ParseHugeMethodMax(option, Usage);
+  } else if (option.starts_with("--large-method-max=")) {
+    ParseLargeMethodMax(option, Usage);
+  } else if (option.starts_with("--small-method-max=")) {
+    ParseSmallMethodMax(option, Usage);
+  } else if (option.starts_with("--tiny-method-max=")) {
+    ParseTinyMethodMax(option, Usage);
+  } else if (option.starts_with("--num-dex-methods=")) {
+    ParseNumDexMethods(option, Usage);
+  } else if (option.starts_with("--inline-depth-limit=")) {
+    ParseInlineDepthLimit(option, Usage);
+  } else if (option.starts_with("--inline-max-code-units=")) {
+    ParseInlineMaxCodeUnits(option, Usage);
+  } else if (option == "--generate-debug-info" || option == "-g") {
+    generate_debug_info_ = true;
+  } else if (option == "--no-generate-debug-info") {
+    generate_debug_info_ = false;
+  } else if (option == "--debuggable") {
+    debuggable_ = true;
+    generate_debug_info_ = true;
+  } else if (option.starts_with("--top-k-profile-threshold=")) {
+    ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
+  } else if (option == "--include-patch-information") {
+    include_patch_information_ = true;
+  } else if (option == "--no-include-patch-information") {
+    include_patch_information_ = false;
+  } else if (option == "--abort-on-hard-verifier-error") {
+    abort_on_hard_verifier_failure_ = true;
+  } else if (option == "--print-pass-names") {
+    pass_manager_options_.SetPrintPassNames(true);
+  } else if (option.starts_with("--disable-passes=")) {
+    ParseDisablePasses(option, Usage);
+  } else if (option.starts_with("--print-passes=")) {
+    ParsePrintPasses(option, Usage);
+  } else if (option == "--print-all-passes") {
+    pass_manager_options_.SetPrintAllPasses();
+  } else if (option.starts_with("--dump-cfg-passes=")) {
+    ParseDumpCfgPasses(option, Usage);
+  } else if (option == "--print-pass-options") {
+    pass_manager_options_.SetPrintPassOptions(true);
+  } else if (option.starts_with("--pass-options=")) {
+    ParsePassOptions(option, Usage);
+  } else if (option.starts_with("--dump-init-failures=")) {
+    ParseDumpInitFailures(option, Usage);
+  } else {
+    // Option not recognized.
+    return false;
+  }
+  return true;
+}
+
 }  // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 18f215d..e6acab4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -22,12 +22,12 @@
 #include <vector>
 
 #include "base/macros.h"
+#include "dex/pass_manager.h"
 #include "globals.h"
+#include "utils.h"
 
 namespace art {
 
-class PassManagerOptions;
-
 class CompilerOptions FINAL {
  public:
   enum CompilerFilter {
@@ -53,6 +53,8 @@
   static const bool kDefaultIncludePatchInformation = false;
   static const size_t kDefaultInlineDepthLimit = 3;
   static const size_t kDefaultInlineMaxCodeUnits = 20;
+  static constexpr size_t kUnsetInlineDepthLimit = -1;
+  static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
 
   // Default inlining settings when the space filter is used.
   static constexpr size_t kSpaceFilterInlineDepthLimit = 3;
@@ -78,7 +80,6 @@
                   bool implicit_suspend_checks,
                   bool compile_pic,
                   const std::vector<std::string>* verbose_methods,
-                  PassManagerOptions* pass_manager_options,
                   std::ostream* init_failure_output,
                   bool abort_on_hard_verifier_failure);
 
@@ -200,47 +201,64 @@
   }
 
   std::ostream* GetInitFailureOutput() const {
-    return init_failure_output_;
+    return init_failure_output_.get();
   }
 
   const PassManagerOptions* GetPassManagerOptions() const {
-    return pass_manager_options_.get();
+    return &pass_manager_options_;
   }
 
   bool AbortOnHardVerifierFailure() const {
     return abort_on_hard_verifier_failure_;
   }
 
+  bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
+
  private:
+  void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
+  void ParsePassOptions(const StringPiece& option, UsageFn Usage);
+  void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
+  void ParsePrintPasses(const StringPiece& option, UsageFn Usage);
+  void ParseDisablePasses(const StringPiece& option, UsageFn Usage);
+  void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
+  void ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage);
+  void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
+  void ParseTinyMethodMax(const StringPiece& option, UsageFn Usage);
+  void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
+  void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage);
+  void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage);
+
   CompilerFilter compiler_filter_;
-  const size_t huge_method_threshold_;
-  const size_t large_method_threshold_;
-  const size_t small_method_threshold_;
-  const size_t tiny_method_threshold_;
-  const size_t num_dex_methods_threshold_;
-  const size_t inline_depth_limit_;
-  const size_t inline_max_code_units_;
-  const bool include_patch_information_;
+  size_t huge_method_threshold_;
+  size_t large_method_threshold_;
+  size_t small_method_threshold_;
+  size_t tiny_method_threshold_;
+  size_t num_dex_methods_threshold_;
+  size_t inline_depth_limit_;
+  size_t inline_max_code_units_;
+  bool include_patch_information_;
   // When using a profile file only the top K% of the profiled samples will be compiled.
-  const double top_k_profile_threshold_;
-  const bool debuggable_;
-  const bool generate_debug_info_;
-  const bool implicit_null_checks_;
-  const bool implicit_so_checks_;
-  const bool implicit_suspend_checks_;
-  const bool compile_pic_;
+  double top_k_profile_threshold_;
+  bool debuggable_;
+  bool generate_debug_info_;
+  bool implicit_null_checks_;
+  bool implicit_so_checks_;
+  bool implicit_suspend_checks_;
+  bool compile_pic_;
 
   // Vector of methods to have verbose output enabled for.
-  const std::vector<std::string>* const verbose_methods_;
+  const std::vector<std::string>* verbose_methods_;
 
-  std::unique_ptr<PassManagerOptions> pass_manager_options_;
+  PassManagerOptions pass_manager_options_;
 
   // Abort compilation with an error if we find a class that fails verification with a hard
   // failure.
-  const bool abort_on_hard_verifier_failure_;
+  bool abort_on_hard_verifier_failure_;
 
   // Log initialization of initialization failures to this stream if not null.
-  std::ostream* const init_failure_output_;
+  std::unique_ptr<std::ostream> init_failure_output_;
+
+  friend class Dex2Oat;
 
   DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
 };
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7e31a7a..21d582e 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -76,6 +76,7 @@
       for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
         dex_file->EnableWrite();
       }
+      compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
       compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
 
       t.NewTiming("WriteElf");
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b563c80..c1b87c9 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -63,9 +63,18 @@
   return jit_compiler->CompileMethod(self, method);
 }
 
+// Callers of this method assume it has NO_RETURN.
+NO_RETURN static void Usage(const char* fmt, ...) {
+  va_list ap;
+  va_start(ap, fmt);
+  std::string error;
+  StringAppendV(&error, fmt, ap);
+  LOG(FATAL) << error;
+  va_end(ap);
+  exit(EXIT_FAILURE);
+}
+
 JitCompiler::JitCompiler() : total_time_(0) {
-  auto* pass_manager_options = new PassManagerOptions;
-  pass_manager_options->SetDisablePassList("GVN,DCE,GVNCleanup");
   compiler_options_.reset(new CompilerOptions(
       CompilerOptions::kDefaultCompilerFilter,
       CompilerOptions::kDefaultHugeMethodThreshold,
@@ -84,9 +93,11 @@
       /* implicit_suspend_checks */ false,
       /* pic */ true,  // TODO: Support non-PIC in optimizing.
       /* verbose_methods */ nullptr,
-      pass_manager_options,
       /* init_failure_output */ nullptr,
       /* abort_on_hard_verifier_failure */ false));
+  for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
+    compiler_options_->ParseCompilerOption(argument, Usage);
+  }
   const InstructionSet instruction_set = kRuntimeISA;
   for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
     VLOG(compiler) << "JIT compiler option " << option;
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index ac38f3d..13754fd 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -36,7 +36,8 @@
   // of code. To avoid any alignment discrepancies for the final chunk, we always align the
   // offset after reserving of writing any chunk.
   uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
-  bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset, MethodReference(nullptr, 0u),
+  bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset,
+                                                MethodReference(nullptr, 0u),
                                                 aligned_offset);
   if (needs_thunk) {
     thunk_locations_.push_back(aligned_offset);
@@ -94,7 +95,8 @@
   // We need the MethodReference for that.
   if (!unprocessed_patches_.empty() &&
       next_aligned_offset - unprocessed_patches_.front().second > max_positive_displacement_) {
-    bool needs_thunk = ReserveSpaceProcessPatches(quick_code_offset, method_ref,
+    bool needs_thunk = ReserveSpaceProcessPatches(quick_code_offset,
+                                                  method_ref,
                                                   next_aligned_offset);
     if (needs_thunk) {
       // A single thunk will cover all pending patches.
@@ -156,7 +158,10 @@
         // If still unresolved, check if we have a thunk within range.
         if (thunk_locations_.empty() ||
             patch_offset - thunk_locations_.back() > max_negative_displacement_) {
-          return next_aligned_offset - patch_offset > max_positive_displacement_;
+          // No thunk in range, we need a thunk if the next aligned offset
+          // is out of range, or if we're at the end of all code.
+          return (next_aligned_offset - patch_offset > max_positive_displacement_) ||
+              (quick_code_offset == next_aligned_offset);  // End of code.
         }
       } else {
         uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index 5515313..a259cda 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -233,6 +233,36 @@
   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
 }
 
+TEST_F(Thumb2RelativePatcherTest, CallTrampolineTooFar) {
+  constexpr uint32_t missing_method_index = 1024u;
+  auto method3_raw_code = GenNopsAndBl(3u, kBlPlus0);
+  constexpr uint32_t bl_offset_in_method3 = 3u * 2u;  // After NOPs.
+  ArrayRef<const uint8_t> method3_code(method3_raw_code);
+  ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size());
+  LinkerPatch method3_patches[] = {
+      LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, missing_method_index),
+  };
+
+  constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */;
+  bool thunk_in_gap = Create2MethodsWithGap(kNopCode,
+                                            ArrayRef<const LinkerPatch>(),
+                                            method3_code,
+                                            ArrayRef<const LinkerPatch>(method3_patches),
+                                            just_over_max_negative_disp - bl_offset_in_method3);
+  ASSERT_FALSE(thunk_in_gap);  // There should be a thunk but it should be after the method2.
+  ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first);
+
+  // Check linked code.
+  uint32_t method3_offset = GetMethodOffset(3u);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+  uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
+  ASSERT_EQ(diff & 1u, 0u);
+  ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
+  auto expected_code = GenNopsAndBl(3u, kBlPlus0 | ((diff >> 1) & 0xffu));
+  EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef<const uint8_t>(expected_code)));
+  EXPECT_TRUE(CheckThunk(thunk_offset));
+}
+
 TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarAfter) {
   auto method1_raw_code = GenNopsAndBl(3u, kBlPlus0);
   constexpr uint32_t bl_offset_in_method1 = 3u * 2u;  // After NOPs.
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 2a426b5..0bfef5e 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -386,6 +386,39 @@
   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
 }
 
+TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) {
+  constexpr uint32_t missing_method_index = 1024u;
+  auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
+  constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
+  ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
+  ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
+  LinkerPatch last_method_patches[] = {
+      LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, missing_method_index),
+  };
+
+  constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
+  uint32_t last_method_idx = Create2MethodsWithGap(
+      kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
+      ArrayRef<const LinkerPatch>(last_method_patches),
+      just_over_max_negative_disp - bl_offset_in_last_method);
+  uint32_t method1_offset = GetMethodOffset(1u);
+  uint32_t last_method_offset = GetMethodOffset(last_method_idx);
+  ASSERT_EQ(method1_offset,
+            last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
+  ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first);
+
+  // Check linked code.
+  uint32_t thunk_offset =
+      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+  uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
+  ASSERT_EQ(diff & 3u, 0u);
+  ASSERT_LT(diff, 128 * MB);
+  auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
+  EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
+                                ArrayRef<const uint8_t>(expected_code)));
+  EXPECT_TRUE(CheckThunk(thunk_offset));
+}
+
 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
   auto method1_raw_code = GenNopsAndBl(1u, kBlPlus0);
   constexpr uint32_t bl_offset_in_method1 = 1u * 4u;  // After NOPs.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 06576cc..ea3cb66 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -98,6 +98,7 @@
   jobject class_loader = nullptr;
   if (kCompile) {
     TimingLogger timings2("OatTest::WriteRead", false, false);
+    compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
     compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
   }
 
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index dcb23bf..c7b8884 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -33,6 +33,7 @@
 #include "driver/compiler_options.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space.h"
+#include "handle_scope-inl.h"
 #include "image_writer.h"
 #include "linker/relative_patcher.h"
 #include "mirror/array.h"
@@ -44,7 +45,7 @@
 #include "output_stream.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
+#include "type_lookup_table.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "verifier/method_verifier.h"
 
@@ -107,6 +108,9 @@
     size_oat_class_status_(0),
     size_oat_class_method_bitmaps_(0),
     size_oat_class_method_offsets_(0),
+    size_oat_lookup_table_alignment_(0),
+    size_oat_lookup_table_offset_(0),
+    size_oat_lookup_table_(0),
     method_offset_map_() {
   CHECK(key_value_store != nullptr);
 
@@ -129,6 +133,10 @@
     offset = InitDexFiles(offset);
   }
   {
+    TimingLogger::ScopedTiming split("InitLookupTables", timings);
+    offset = InitLookupTables(offset);
+  }
+  {
     TimingLogger::ScopedTiming split("InitOatClasses", timings);
     offset = InitOatClasses(offset);
   }
@@ -322,7 +330,8 @@
     return true;
   }
 
-  bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
+  bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+                   const ClassDataItemIterator& it) {
     // Fill in the compiled_methods_ array for methods that have a
     // CompiledMethod. We track the number of non-null entries in
     // num_non_null_compiled_methods_ since we only want to allocate
@@ -1043,11 +1052,29 @@
     oat_dex_files_[i]->dex_file_offset_ = offset;
 
     const DexFile* dex_file = (*dex_files_)[i];
+
+    // Initialize type lookup table
+    oat_dex_files_[i]->lookup_table_ = dex_file->GetTypeLookupTable();
+
     offset += dex_file->GetHeader().file_size_;
   }
   return offset;
 }
 
+size_t OatWriter::InitLookupTables(size_t offset) {
+  for (OatDexFile* oat_dex_file : oat_dex_files_) {
+    if (oat_dex_file->lookup_table_ != nullptr) {
+      uint32_t aligned_offset = RoundUp(offset, 4);
+      oat_dex_file->lookup_table_offset_ = aligned_offset;
+      size_oat_lookup_table_alignment_ += aligned_offset - offset;
+      offset = aligned_offset + oat_dex_file->lookup_table_->RawDataLength();
+    } else {
+      oat_dex_file->lookup_table_offset_ = 0;
+    }
+  }
+  return offset;
+}
+
 size_t OatWriter::InitOatClasses(size_t offset) {
   // calculate the offsets within OatDexFiles to OatClasses
   InitOatClassesMethodVisitor visitor(this, offset);
@@ -1256,6 +1283,9 @@
     DO_STAT(size_oat_class_status_);
     DO_STAT(size_oat_class_method_bitmaps_);
     DO_STAT(size_oat_class_method_offsets_);
+    DO_STAT(size_oat_lookup_table_alignment_);
+    DO_STAT(size_oat_lookup_table_offset_);
+    DO_STAT(size_oat_lookup_table_);
     #undef DO_STAT
 
     VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \
@@ -1309,6 +1339,9 @@
     }
     size_dex_file_ += dex_file->GetHeader().file_size_;
   }
+  if (!WriteLookupTables(out, file_offset)) {
+    return false;
+  }
   for (size_t i = 0; i != oat_classes_.size(); ++i) {
     if (!oat_classes_[i]->Write(this, out, file_offset)) {
       PLOG(ERROR) << "Failed to write oat methods information to " << out->GetLocation();
@@ -1318,6 +1351,35 @@
   return true;
 }
 
+bool OatWriter::WriteLookupTables(OutputStream* out, const size_t file_offset) {
+  for (size_t i = 0; i < oat_dex_files_.size(); ++i) {
+    const uint32_t lookup_table_offset = oat_dex_files_[i]->lookup_table_offset_;
+    const TypeLookupTable* table = oat_dex_files_[i]->lookup_table_;
+    DCHECK_EQ(lookup_table_offset == 0, table == nullptr);
+    if (lookup_table_offset == 0) {
+      continue;
+    }
+    const uint32_t expected_offset = file_offset + lookup_table_offset;
+    off_t actual_offset = out->Seek(expected_offset, kSeekSet);
+    if (static_cast<uint32_t>(actual_offset) != expected_offset) {
+      const DexFile* dex_file = (*dex_files_)[i];
+      PLOG(ERROR) << "Failed to seek to lookup table section. Actual: " << actual_offset
+                  << " Expected: " << expected_offset << " File: " << dex_file->GetLocation();
+      return false;
+    }
+    if (table != nullptr) {
+      if (!out->WriteFully(table->RawData(), table->RawDataLength())) {
+        const DexFile* dex_file = (*dex_files_)[i];
+        PLOG(ERROR) << "Failed to write lookup table for " << dex_file->GetLocation()
+                    << " to " << out->GetLocation();
+        return false;
+      }
+      size_oat_lookup_table_ += table->RawDataLength();
+    }
+  }
+  return true;
+}
+
 size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
   #define VISIT(VisitorType)                                              \
     do {                                                                  \
@@ -1425,6 +1487,7 @@
   dex_file_location_data_ = reinterpret_cast<const uint8_t*>(location.data());
   dex_file_location_checksum_ = dex_file.GetLocationChecksum();
   dex_file_offset_ = 0;
+  lookup_table_offset_ = 0;
   methods_offsets_.resize(dex_file.NumClassDefs());
 }
 
@@ -1433,6 +1496,7 @@
           + dex_file_location_size_
           + sizeof(dex_file_location_checksum_)
           + sizeof(dex_file_offset_)
+          + sizeof(lookup_table_offset_)
           + (sizeof(methods_offsets_[0]) * methods_offsets_.size());
 }
 
@@ -1441,6 +1505,10 @@
   oat_header->UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
   oat_header->UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
   oat_header->UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
+  oat_header->UpdateChecksum(&lookup_table_offset_, sizeof(lookup_table_offset_));
+  if (lookup_table_ != nullptr) {
+    oat_header->UpdateChecksum(lookup_table_->RawData(), lookup_table_->RawDataLength());
+  }
   oat_header->UpdateChecksum(&methods_offsets_[0],
                             sizeof(methods_offsets_[0]) * methods_offsets_.size());
 }
@@ -1469,6 +1537,11 @@
     return false;
   }
   oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
+  if (!out->WriteFully(&lookup_table_offset_, sizeof(lookup_table_offset_))) {
+    PLOG(ERROR) << "Failed to write lookup table offset to " << out->GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_lookup_table_offset_ += sizeof(lookup_table_offset_);
   if (!out->WriteFully(&methods_offsets_[0],
                       sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
     PLOG(ERROR) << "Failed to write methods offsets to " << out->GetLocation();
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index d6cb65b..f2fe048 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -24,8 +24,8 @@
 #include "linker/relative_patcher.h"  // For linker::RelativePatcherTargetProvider.
 #include "mem_map.h"
 #include "method_reference.h"
-#include "oat.h"
 #include "mirror/class.h"
+#include "oat.h"
 #include "safe_map.h"
 
 namespace art {
@@ -36,6 +36,7 @@
 class ImageWriter;
 class OutputStream;
 class TimingLogger;
+class TypeLookupTable;
 
 // OatHeader         variable length with count of D OatDexFiles
 //
@@ -49,6 +50,11 @@
 // ...
 // Dex[D]
 //
+// TypeLookupTable[0] one descriptor to class def index hash table for each OatDexFile.
+// TypeLookupTable[1]
+// ...
+// TypeLookupTable[D]
+//
 // OatClass[0]       one variable sized OatClass for each of C DexFile::ClassDefs
 // OatClass[1]       contains OatClass entries with class status, offsets to code, etc.
 // ...
@@ -168,6 +174,7 @@
 
   size_t InitOatHeader();
   size_t InitOatDexFiles(size_t offset);
+  size_t InitLookupTables(size_t offset);
   size_t InitDexFiles(size_t offset);
   size_t InitOatClasses(size_t offset);
   size_t InitOatMaps(size_t offset);
@@ -177,6 +184,7 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool WriteTables(OutputStream* out, const size_t file_offset);
+  bool WriteLookupTables(OutputStream* out, const size_t file_offset);
   size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
   size_t WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset);
   size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
@@ -199,6 +207,8 @@
     const uint8_t* dex_file_location_data_;
     uint32_t dex_file_location_checksum_;
     uint32_t dex_file_offset_;
+    uint32_t lookup_table_offset_;
+    TypeLookupTable* lookup_table_;  // Owned by the dex file.
     std::vector<uint32_t> methods_offsets_;
 
    private:
@@ -333,6 +343,9 @@
   uint32_t size_oat_class_status_;
   uint32_t size_oat_class_method_bitmaps_;
   uint32_t size_oat_class_method_offsets_;
+  uint32_t size_oat_lookup_table_alignment_;
+  uint32_t size_oat_lookup_table_offset_;
+  uint32_t size_oat_lookup_table_;
 
   std::unique_ptr<linker::RelativePatcher> relative_patcher_;
 
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e6b9273..29d08be 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -19,10 +19,12 @@
 #include "arch/mips/entrypoints_direct_mips.h"
 #include "arch/mips/instruction_set_features_mips.h"
 #include "art_method.h"
+#include "code_generator_utils.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
 #include "intrinsics.h"
+#include "intrinsics_mips.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "offsets.h"
@@ -2933,7 +2935,11 @@
 }
 
 void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  // TODO: intrinsic function.
+  IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
+  if (intrinsic.TryDispatch(invoke)) {
+    return;
+  }
+
   HandleInvoke(invoke);
 }
 
@@ -2942,13 +2948,18 @@
   // invokes must have been pruned by art::PrepareForRegisterAllocation.
   DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
 
-  // TODO: intrinsic function.
+  IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
+  if (intrinsic.TryDispatch(invoke)) {
+    return;
+  }
+
   HandleInvoke(invoke);
 }
 
-static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen ATTRIBUTE_UNUSED) {
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
   if (invoke->GetLocations()->Intrinsified()) {
-    // TODO: intrinsic function.
+    IntrinsicCodeGeneratorMIPS intrinsic(codegen);
+    intrinsic.Dispatch(invoke);
     return true;
   }
   return false;
@@ -3088,7 +3099,10 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  // TODO: Try to generate intrinsics code.
+  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+    return;
+  }
+
   LocationSummary* locations = invoke->GetLocations();
   Location receiver = locations->InAt(0);
   Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index e0aa4ff..57452cc 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -27,6 +27,11 @@
  private:
   void VisitShift(HBinaryOperation* shift);
 
+  void VisitAbove(HAbove* instruction) OVERRIDE;
+  void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
+  void VisitBelow(HBelow* instruction) OVERRIDE;
+  void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
+
   void VisitAnd(HAnd* instruction) OVERRIDE;
   void VisitCompare(HCompare* instruction) OVERRIDE;
   void VisitMul(HMul* instruction) OVERRIDE;
@@ -105,6 +110,54 @@
   }
 }
 
+void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) {
+  if (instruction->GetLeft()->IsConstant() &&
+      instruction->GetLeft()->AsConstant()->IsZero()) {
+    // Replace code looking like
+    //    ABOVE dst, 0, src  // unsigned 0 > src is always false
+    // with
+    //    CONSTANT false
+    instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+    instruction->GetBlock()->RemoveInstruction(instruction);
+  }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitAboveOrEqual(HAboveOrEqual* instruction) {
+  if (instruction->GetRight()->IsConstant() &&
+      instruction->GetRight()->AsConstant()->IsZero()) {
+    // Replace code looking like
+    //    ABOVE_OR_EQUAL dst, src, 0  // unsigned src >= 0 is always true
+    // with
+    //    CONSTANT true
+    instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+    instruction->GetBlock()->RemoveInstruction(instruction);
+  }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitBelow(HBelow* instruction) {
+  if (instruction->GetRight()->IsConstant() &&
+      instruction->GetRight()->AsConstant()->IsZero()) {
+    // Replace code looking like
+    //    BELOW dst, src, 0  // unsigned src < 0 is always false
+    // with
+    //    CONSTANT false
+    instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+    instruction->GetBlock()->RemoveInstruction(instruction);
+  }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitBelowOrEqual(HBelowOrEqual* instruction) {
+  if (instruction->GetLeft()->IsConstant() &&
+      instruction->GetLeft()->AsConstant()->IsZero()) {
+    // Replace code looking like
+    //    BELOW_OR_EQUAL dst, 0, src  // unsigned 0 <= src is always true
+    // with
+    //    CONSTANT true
+    instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+    instruction->GetBlock()->RemoveInstruction(instruction);
+  }
+}
+
 void InstructionWithAbsorbingInputSimplifier::VisitAnd(HAnd* instruction) {
   HConstant* input_cst = instruction->GetConstantRight();
   if ((input_cst != nullptr) && input_cst->IsZero()) {
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 2feb75c..e469c8d 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -29,50 +29,70 @@
 
 namespace art {
 
-static void TestCode(const uint16_t* data,
-                     const std::string& expected_before,
-                     const std::string& expected_after_cf,
-                     const std::string& expected_after_dce,
-                     std::function<void(HGraph*)> check_after_cf,
-                     Primitive::Type return_type = Primitive::kPrimInt) {
-  ArenaPool pool;
-  ArenaAllocator allocator(&pool);
-  HGraph* graph = CreateCFG(&allocator, data, return_type);
-  ASSERT_NE(graph, nullptr);
+/**
+ * Fixture class for the constant folding and dce tests.
+ */
+class ConstantFoldingTest : public testing::Test {
+ public:
+  ConstantFoldingTest() : pool_(), allocator_(&pool_) {
+    graph_ = CreateGraph(&allocator_);
+  }
 
-  graph->TryBuildingSsa();
+  void TestCode(const uint16_t* data,
+                const std::string& expected_before,
+                const std::string& expected_after_cf,
+                const std::string& expected_after_dce,
+                std::function<void(HGraph*)> check_after_cf,
+                Primitive::Type return_type = Primitive::kPrimInt) {
+    graph_ = CreateCFG(&allocator_, data, return_type);
+    TestCodeOnReadyGraph(expected_before,
+                         expected_after_cf,
+                         expected_after_dce,
+                         check_after_cf);
+  }
 
-  StringPrettyPrinter printer_before(graph);
-  printer_before.VisitInsertionOrder();
-  std::string actual_before = printer_before.str();
-  ASSERT_EQ(expected_before, actual_before);
+  void TestCodeOnReadyGraph(const std::string& expected_before,
+                            const std::string& expected_after_cf,
+                            const std::string& expected_after_dce,
+                            std::function<void(HGraph*)> check_after_cf) {
+    ASSERT_NE(graph_, nullptr);
+    graph_->TryBuildingSsa();
 
-  std::unique_ptr<const X86InstructionSetFeatures> features_x86(
-      X86InstructionSetFeatures::FromCppDefines());
-  x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
-  HConstantFolding(graph).Run();
-  SSAChecker ssa_checker_cf(graph);
-  ssa_checker_cf.Run();
-  ASSERT_TRUE(ssa_checker_cf.IsValid());
+    StringPrettyPrinter printer_before(graph_);
+    printer_before.VisitInsertionOrder();
+    std::string actual_before = printer_before.str();
+    EXPECT_EQ(expected_before, actual_before);
 
-  StringPrettyPrinter printer_after_cf(graph);
-  printer_after_cf.VisitInsertionOrder();
-  std::string actual_after_cf = printer_after_cf.str();
-  ASSERT_EQ(expected_after_cf, actual_after_cf);
+    std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+        X86InstructionSetFeatures::FromCppDefines());
+    x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
+    HConstantFolding(graph_).Run();
+    SSAChecker ssa_checker_cf(graph_);
+    ssa_checker_cf.Run();
+    ASSERT_TRUE(ssa_checker_cf.IsValid());
 
-  check_after_cf(graph);
+    StringPrettyPrinter printer_after_cf(graph_);
+    printer_after_cf.VisitInsertionOrder();
+    std::string actual_after_cf = printer_after_cf.str();
+    EXPECT_EQ(expected_after_cf, actual_after_cf);
 
-  HDeadCodeElimination(graph).Run();
-  SSAChecker ssa_checker_dce(graph);
-  ssa_checker_dce.Run();
-  ASSERT_TRUE(ssa_checker_dce.IsValid());
+    check_after_cf(graph_);
 
-  StringPrettyPrinter printer_after_dce(graph);
-  printer_after_dce.VisitInsertionOrder();
-  std::string actual_after_dce = printer_after_dce.str();
-  ASSERT_EQ(expected_after_dce, actual_after_dce);
-}
+    HDeadCodeElimination(graph_).Run();
+    SSAChecker ssa_checker_dce(graph_);
+    ssa_checker_dce.Run();
+    ASSERT_TRUE(ssa_checker_dce.IsValid());
 
+    StringPrettyPrinter printer_after_dce(graph_);
+    printer_after_dce.VisitInsertionOrder();
+    std::string actual_after_dce = printer_after_dce.str();
+    EXPECT_EQ(expected_after_dce, actual_after_dce);
+  }
+
+  ArenaPool pool_;
+  ArenaAllocator allocator_;
+  HGraph* graph_;
+};
 
 /**
  * Tiny three-register program exercising int constant folding on negation.
@@ -84,7 +104,7 @@
  *     v1 <- -v0                1.      neg-int v1, v0
  *     return v1                2.      return v1
  */
-TEST(ConstantFolding, IntConstantFoldingNegation) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingNegation) {
   const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 0 << 8 | 1 << 12,
     Instruction::NEG_INT | 1 << 8 | 0 << 12,
@@ -141,7 +161,7 @@
  *     (v2, v3) <- -(v0, v1)    1.      neg-long v2, v0
  *     return (v2, v3)          2.      return-wide v2
  */
-TEST(ConstantFolding, LongConstantFoldingNegation) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
   const int64_t input = INT64_C(4294967296);             // 2^32
   const uint16_t word0 = Low16Bits(Low32Bits(input));    // LSW.
   const uint16_t word1 = High16Bits(Low32Bits(input));
@@ -205,7 +225,7 @@
  *     v2 <- v0 + v1            2.      add-int v2, v0, v1
  *     return v2                4.      return v2
  */
-TEST(ConstantFolding, IntConstantFoldingOnAddition1) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
   const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 0 << 8 | 1 << 12,
     Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -271,7 +291,7 @@
  *     v2 <- v0 + v1            6.      add-int v2, v0, v1
  *     return v2                8.      return v2
  */
-TEST(ConstantFolding, IntConstantFoldingOnAddition2) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
   const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 0 << 8 | 1 << 12,
     Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -357,7 +377,7 @@
  *     v2 <- v0 - v1            2.      sub-int v2, v0, v1
  *     return v2                4.      return v2
  */
-TEST(ConstantFolding, IntConstantFoldingOnSubtraction) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
   const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 0 << 8 | 3 << 12,
     Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -421,7 +441,7 @@
  *       (v0, v1) + (v1, v2)    4.      add-long v4, v0, v2
  *     return (v4, v5)          6.      return-wide v4
  */
-TEST(ConstantFolding, LongConstantFoldingOnAddition) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
   const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
     Instruction::CONST_WIDE_16 | 0 << 8, 1,
     Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -486,7 +506,7 @@
  *       (v0, v1) - (v1, v2)    4.      sub-long v4, v0, v2
  *     return (v4, v5)          6.      return-wide v4
  */
-TEST(ConstantFolding, LongConstantFoldingOnSubtraction) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
   const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
     Instruction::CONST_WIDE_16 | 0 << 8, 3,
     Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -560,7 +580,7 @@
  * L3: v2 <- v1 + 8             11.     add-int/lit16 v2, v1, #+8
  *     return v2                13.     return v2
  */
-TEST(ConstantFolding, IntConstantFoldingAndJumps) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
   const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 0 << 8 | 1 << 12,
     Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -656,7 +676,6 @@
            check_after_cf);
 }
 
-
 /**
  * Three-register program with a constant (static) condition.
  *
@@ -670,7 +689,7 @@
  * L1: v2 <- v0 + v1            5.      add-int v2, v0, v1
  *     return-void              7.      return
  */
-TEST(ConstantFolding, ConstantCondition) {
+TEST_F(ConstantFoldingTest, ConstantCondition) {
   const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
     Instruction::CONST_4 | 1 << 8 | 1 << 12,
     Instruction::CONST_4 | 0 << 8 | 0 << 12,
@@ -732,4 +751,109 @@
            check_after_cf);
 }
 
+/**
+ * Unsigned comparisons with zero. Since these instructions are not present
+ * in the bytecode, we need to set up the graph explicitly.
+ */
+TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
+  graph_ = CreateGraph(&allocator_);
+  HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+  graph_->AddBlock(entry_block);
+  graph_->SetEntryBlock(entry_block);
+  HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+  graph_->AddBlock(block);
+  HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+  graph_->AddBlock(exit_block);
+  graph_->SetExitBlock(exit_block);
+  entry_block->AddSuccessor(block);
+  block->AddSuccessor(exit_block);
+
+  // Make various unsigned comparisons with zero against a parameter.
+  HInstruction* parameter = new (&allocator_) HParameterValue(
+      graph_->GetDexFile(), 0, 0, Primitive::kPrimInt, true);
+  entry_block->AddInstruction(parameter);
+  HInstruction* zero = graph_->GetIntConstant(0);
+  HInstruction* last;
+  block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+  block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
+  block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+
+  entry_block->AddInstruction(new (&allocator_) HGoto());
+  block->AddInstruction(new (&allocator_) HReturn(zero));
+  exit_block->AddInstruction(new (&allocator_) HExit());
+
+  const std::string expected_before =
+      "BasicBlock 0, succ: 1\n"
+      "  0: ParameterValue [16, 14, 12, 10, 8, 6, 4, 2]\n"
+      "  1: IntConstant [19, 16, 14, 12, 10, 8, 6, 4, 2]\n"
+      "  18: Goto 1\n"
+      "BasicBlock 1, pred: 0, succ: 2\n"
+      "  2: Above(1, 0) [3]\n"
+      "  3: Deoptimize(2)\n"
+      "  4: Above(0, 1) [5]\n"
+      "  5: Deoptimize(4)\n"
+      "  6: AboveOrEqual(1, 0) [7]\n"
+      "  7: Deoptimize(6)\n"
+      "  8: AboveOrEqual(0, 1) [9]\n"
+      "  9: Deoptimize(8)\n"
+      "  10: Below(1, 0) [11]\n"
+      "  11: Deoptimize(10)\n"
+      "  12: Below(0, 1) [13]\n"
+      "  13: Deoptimize(12)\n"
+      "  14: BelowOrEqual(1, 0) [15]\n"
+      "  15: Deoptimize(14)\n"
+      "  16: BelowOrEqual(0, 1) [17]\n"
+      "  17: Deoptimize(16)\n"
+      "  19: Return(1)\n"
+      "BasicBlock 2, pred: 1\n"
+      "  20: Exit\n";
+
+  const std::string expected_after_cf =
+      "BasicBlock 0, succ: 1\n"
+      "  0: ParameterValue [16, 10, 6, 4]\n"
+      "  1: IntConstant [13, 3, 19, 16, 10, 6, 4]\n"
+      "  21: IntConstant [15, 9]\n"
+      "  18: Goto 1\n"
+      "BasicBlock 1, pred: 0, succ: 2\n"
+      "  3: Deoptimize(1)\n"
+      "  4: Above(0, 1) [5]\n"
+      "  5: Deoptimize(4)\n"
+      "  6: AboveOrEqual(1, 0) [7]\n"
+      "  7: Deoptimize(6)\n"
+      "  9: Deoptimize(21)\n"
+      "  10: Below(1, 0) [11]\n"
+      "  11: Deoptimize(10)\n"
+      "  13: Deoptimize(1)\n"
+      "  15: Deoptimize(21)\n"
+      "  16: BelowOrEqual(0, 1) [17]\n"
+      "  17: Deoptimize(16)\n"
+      "  19: Return(1)\n"
+      "BasicBlock 2, pred: 1\n"
+      "  20: Exit\n";
+
+  const std::string expected_after_dce = expected_after_cf;
+
+  auto check_after_cf = [](HGraph* graph) {
+    CHECK(graph != nullptr);
+  };
+
+  TestCodeOnReadyGraph(expected_before,
+                       expected_after_cf,
+                       expected_after_dce,
+                       check_after_cf);
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 8968a44..fdf8cc9 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -20,19 +20,6 @@
 namespace art {
 
 /**
- * Returns true if instruction is invariant within the given loop.
- */
-static bool IsLoopInvariant(HLoopInformation* loop, HInstruction* instruction) {
-  HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
-  if (other_loop != loop) {
-    // If instruction does not occur in same loop, it is invariant
-    // if it appears in an outer loop (including no loop at all).
-    return other_loop == nullptr || loop->IsIn(*other_loop);
-  }
-  return false;
-}
-
-/**
  * Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
  * along dependences, viz. any of (a, b, c, d), (d, a, b, c)  (c, d, a, b), (b, c, d, a) assuming
  * a chain of dependences (mutual independent items may occur in arbitrary order). For proper
@@ -601,15 +588,16 @@
   //     an unsigned entity, for example, as in the following loop that uses the full range:
   //     for (int i = INT_MIN; i < INT_MAX; i++) // TC = UINT_MAX
   // (2) The TC is only valid if the loop is taken, otherwise TC = 0, as in:
-  //     for (int i = 12; i < U; i++) // TC = 0 when U >= 12
+  //     for (int i = 12; i < U; i++) // TC = 0 when U < 12
   //     If this cannot be determined at compile-time, the TC is only valid within the
-  //     loop-body proper, not the loop-header unless enforced with an explicit condition.
+  //     loop-body proper, not the loop-header unless enforced with an explicit taken-test.
   // (3) The TC is only valid if the loop is finite, otherwise TC has no value, as in:
   //     for (int i = 0; i <= U; i++) // TC = Inf when U = INT_MAX
   //     If this cannot be determined at compile-time, the TC is only valid when enforced
-  //     with an explicit condition.
+  //     with an explicit finite-test.
   // (4) For loops which early-exits, the TC forms an upper bound, as in:
   //     for (int i = 0; i < 10 && ....; i++) // TC <= 10
+  InductionInfo* trip_count = upper_expr;
   const bool is_taken = IsTaken(lower_expr, upper_expr, cmp);
   const bool is_finite = IsFinite(upper_expr, stride_value, type, cmp);
   const bool cancels = (cmp == kCondLT || cmp == kCondGT) && std::abs(stride_value) == 1;
@@ -617,26 +605,36 @@
     // Convert exclusive integral inequality into inclusive integral inequality,
     // viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
     if (cmp == kCondLT) {
-      upper_expr = CreateInvariantOp(kSub, upper_expr, CreateConstant(1, type));
+      trip_count = CreateInvariantOp(kSub, trip_count, CreateConstant(1, type));
     } else if (cmp == kCondGT) {
-      upper_expr = CreateInvariantOp(kAdd, upper_expr, CreateConstant(1, type));
+      trip_count = CreateInvariantOp(kAdd, trip_count, CreateConstant(1, type));
     }
     // Compensate for stride.
-    upper_expr = CreateInvariantOp(kAdd, upper_expr, stride);
+    trip_count = CreateInvariantOp(kAdd, trip_count, stride);
   }
-  InductionInfo* trip_count
-      = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, upper_expr, lower_expr), stride);
+  trip_count = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, trip_count, lower_expr), stride);
   // Assign the trip-count expression to the loop control. Clients that use the information
   // should be aware that the expression is only valid under the conditions listed above.
-  InductionOp tcKind = kTripCountInBodyUnsafe;
+  InductionOp tcKind = kTripCountInBodyUnsafe;  // needs both tests
   if (is_taken && is_finite) {
-    tcKind = kTripCountInLoop;
+    tcKind = kTripCountInLoop;  // needs neither test
   } else if (is_finite) {
-    tcKind = kTripCountInBody;
+    tcKind = kTripCountInBody;  // needs taken-test
   } else if (is_taken) {
-    tcKind = kTripCountInLoopUnsafe;
+    tcKind = kTripCountInLoopUnsafe;  // needs finite-test
   }
-  AssignInfo(loop, loop->GetHeader()->GetLastInstruction(), CreateTripCount(tcKind, trip_count));
+  InductionOp op = kNop;
+  switch (cmp) {
+    case kCondLT: op = kLT; break;
+    case kCondLE: op = kLE; break;
+    case kCondGT: op = kGT; break;
+    case kCondGE: op = kGE; break;
+    default:      LOG(FATAL) << "CONDITION UNREACHABLE";
+  }
+  InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr);
+  AssignInfo(loop,
+             loop->GetHeader()->GetLastInstruction(),
+             CreateTripCount(tcKind, trip_count, taken_test));
 }
 
 bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
@@ -707,7 +705,7 @@
       return loop_it->second;
     }
   }
-  if (IsLoopInvariant(loop, instruction)) {
+  if (loop->IsLoopInvariant(instruction, true)) {
     InductionInfo* info = CreateInvariantFetch(instruction);
     AssignInfo(loop, instruction, info);
     return info;
@@ -829,12 +827,16 @@
       std::string inv = "(";
       inv += InductionToString(info->op_a);
       switch (info->operation) {
-        case kNop:   inv += " @ "; break;
-        case kAdd:   inv += " + "; break;
+        case kNop:   inv += " @ ";  break;
+        case kAdd:   inv += " + ";  break;
         case kSub:
-        case kNeg:   inv += " - "; break;
-        case kMul:   inv += " * "; break;
-        case kDiv:   inv += " / "; break;
+        case kNeg:   inv += " - ";  break;
+        case kMul:   inv += " * ";  break;
+        case kDiv:   inv += " / ";  break;
+        case kLT:    inv += " < ";  break;
+        case kLE:    inv += " <= "; break;
+        case kGT:    inv += " > ";  break;
+        case kGE:    inv += " >= "; break;
         case kFetch:
           DCHECK(info->fetch);
           if (IsIntAndGet(info, &value)) {
@@ -843,10 +845,10 @@
             inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
           }
           break;
-        case kTripCountInLoop:       inv += "TC-loop:"; break;
-        case kTripCountInBody:       inv += "TC-body:"; break;
-        case kTripCountInLoopUnsafe: inv += "TC-loop-unsafe:"; break;
-        case kTripCountInBodyUnsafe: inv += "TC-body-unsafe:"; break;
+        case kTripCountInLoop:       inv += " (TC-loop) ";        break;
+        case kTripCountInBody:       inv += " (TC-body) ";        break;
+        case kTripCountInLoopUnsafe: inv += " (TC-loop-unsafe) "; break;
+        case kTripCountInBodyUnsafe: inv += " (TC-body-unsafe) "; break;
       }
       inv += InductionToString(info->op_b);
       return inv + ")";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 7ab80cd..cf35409 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -65,11 +65,16 @@
     kMul,
     kDiv,
     kFetch,
-    // Trip counts (valid in full loop or only body proper; unsafe implies loop may be infinite).
-    kTripCountInLoop,
-    kTripCountInBody,
-    kTripCountInLoopUnsafe,
-    kTripCountInBodyUnsafe
+    // Trip-counts.
+    kTripCountInLoop,        // valid in full loop; loop is finite
+    kTripCountInBody,        // valid in body only; loop is finite
+    kTripCountInLoopUnsafe,  // valid in full loop; loop may be infinite
+    kTripCountInBodyUnsafe,  // valid in body only; loop may be infinite
+    // Comparisons for trip-count tests.
+    kLT,
+    kLE,
+    kGT,
+    kGE
   };
 
   /**
@@ -85,7 +90,7 @@
    *   (4) periodic
    *         nop: a, then defined by b (repeated when exhausted)
    *   (5) trip-count:
-   *         tc: defined by b
+   *         tc: defined by a, taken-test in b
    */
   struct InductionInfo : public ArenaObject<kArenaAllocInductionVarAnalysis> {
     InductionInfo(InductionClass ic,
@@ -119,8 +124,9 @@
     return new (graph_->GetArena()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f);
   }
 
-  InductionInfo* CreateTripCount(InductionOp op, InductionInfo* b) {
-    return new (graph_->GetArena()) InductionInfo(kInvariant, op, nullptr, b, nullptr);
+  InductionInfo* CreateTripCount(InductionOp op, InductionInfo* a, InductionInfo* b) {
+    DCHECK(a != nullptr);
+    return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
   }
 
   InductionInfo* CreateInduction(InductionClass ic, InductionInfo* a, InductionInfo* b) {
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index f16da2a..b7262f6 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -234,7 +234,7 @@
   EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[0], 0).c_str());
 
   // Trip-count.
-  EXPECT_STREQ("(TC-loop:(100))",
+  EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
                GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
 }
 
@@ -552,7 +552,7 @@
     }
     EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[d], d).c_str());
     // Trip-count.
-    EXPECT_STREQ("(TC-loop:(100))",
+    EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
                  GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
   }
 }
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index f4842f9..5530d26 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -152,7 +152,7 @@
     }
   } else if (is_min) {
     // Special case for finding minimum: minimum of trip-count in loop-body is 1.
-    if (trip != nullptr && in_body && instruction == trip->op_b->fetch) {
+    if (trip != nullptr && in_body && instruction == trip->op_a->fetch) {
       return Value(1);
     }
   }
@@ -185,14 +185,14 @@
             return GetFetch(info->fetch, trip, in_body, is_min);
           case HInductionVarAnalysis::kTripCountInLoop:
             if (!in_body && !is_min) {  // one extra!
-              return GetVal(info->op_b, trip, in_body, is_min);
+              return GetVal(info->op_a, trip, in_body, is_min);
             }
             FALLTHROUGH_INTENDED;
           case HInductionVarAnalysis::kTripCountInBody:
             if (is_min) {
               return Value(0);
             } else if (in_body) {
-              return SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
+              return SubValue(GetVal(info->op_a, trip, in_body, is_min), Value(1));
             }
             break;
           default:
@@ -428,7 +428,7 @@
             return true;
           case HInductionVarAnalysis::kTripCountInLoop:
             if (!in_body && !is_min) {  // one extra!
-              return GenerateCode(info->op_b, trip, graph, block, result, in_body, is_min);
+              return GenerateCode(info->op_a, trip, graph, block, result, in_body, is_min);
             }
             FALLTHROUGH_INTENDED;
           case HInductionVarAnalysis::kTripCountInBody:
@@ -438,7 +438,7 @@
               }
               return true;
             } else if (in_body) {
-              if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+              if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) {
                 if (graph != nullptr) {
                   *result = Insert(block,
                                    new (graph->GetArena())
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 8fbc59f..ce8926a 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -125,7 +125,7 @@
 
   /** Constructs a trip-count. */
   HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc) {
-    return iva_->CreateTripCount(HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc));
+    return iva_->CreateTripCount(HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc), nullptr);
   }
 
   /** Constructs a linear a * i + b induction. */
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
new file mode 100644
index 0000000..5efcf4e
--- /dev/null
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsics_mips.h"
+
+#include "arch/mips/instruction_set_features_mips.h"
+#include "art_method.h"
+#include "code_generator_mips.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/string.h"
+#include "thread.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/mips/constants_mips.h"
+
+namespace art {
+
+namespace mips {
+
+IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
+  : arena_(codegen->GetGraph()->GetArena()) {
+}
+
+MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
+  return reinterpret_cast<MipsAssembler*>(codegen_->GetAssembler());
+}
+
+ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
+  return codegen_->GetGraph()->GetArena();
+}
+
+#define __ codegen->GetAssembler()->
+
+static void MoveFromReturnRegister(Location trg,
+                                   Primitive::Type type,
+                                   CodeGeneratorMIPS* codegen) {
+  if (!trg.IsValid()) {
+    DCHECK_EQ(type, Primitive::kPrimVoid);
+    return;
+  }
+
+  DCHECK_NE(type, Primitive::kPrimVoid);
+
+  if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
+    Register trg_reg = trg.AsRegister<Register>();
+    if (trg_reg != V0) {
+      __ Move(V0, trg_reg);
+    }
+  } else {
+    FRegister trg_reg = trg.AsFpuRegister<FRegister>();
+    if (trg_reg != F0) {
+      if (type == Primitive::kPrimFloat) {
+        __ MovS(F0, trg_reg);
+      } else {
+        __ MovD(F0, trg_reg);
+      }
+    }
+  }
+}
+
+static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
+  InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
+  IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
+}
+
+// Slow-path for fallback (calling the managed code to handle the
+// intrinsic) in an intrinsified call. This will copy the arguments
+// into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations
+//       given by the invoke's location summary. If an intrinsic
+//       modifies those locations before a slowpath call, they must be
+//       restored!
+class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : invoke_(invoke) { }
+
+  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+    CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
+
+    __ Bind(GetEntryLabel());
+
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+    MoveArguments(invoke_, codegen);
+
+    if (invoke_->IsInvokeStaticOrDirect()) {
+      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+                                          Location::RegisterLocation(A0));
+      codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+    } else {
+      UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+      UNREACHABLE();
+    }
+
+    // Copy the result back to the expected output.
+    Location out = invoke_->GetLocations()->Out();
+    if (out.IsValid()) {
+      DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
+      DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+      MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+    }
+
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; }
+
+ private:
+  // The instruction where this slow path is happening.
+  HInvoke* const invoke_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS);
+};
+
+#undef __
+
+bool IntrinsicLocationsBuilderMIPS::TryDispatch(HInvoke* invoke) {
+  Dispatch(invoke);
+  LocationSummary* res = invoke->GetLocations();
+  return res != nullptr && res->Intrinsified();
+}
+
+#define __ assembler->
+
+// Unimplemented intrinsics.
+
+#define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
+void IntrinsicLocationsBuilderMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+}                                                                                      \
+void IntrinsicCodeGeneratorMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
+}
+
+UNIMPLEMENTED_INTRINSIC(IntegerReverse)
+UNIMPLEMENTED_INTRINSIC(LongReverse)
+UNIMPLEMENTED_INTRINSIC(ShortReverseBytes)
+UNIMPLEMENTED_INTRINSIC(IntegerReverseBytes)
+UNIMPLEMENTED_INTRINSIC(LongReverseBytes)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(FloatIntBitsToFloat)
+UNIMPLEMENTED_INTRINSIC(DoubleLongBitsToDouble)
+UNIMPLEMENTED_INTRINSIC(FloatFloatToRawIntBits)
+UNIMPLEMENTED_INTRINSIC(DoubleDoubleToRawLongBits)
+UNIMPLEMENTED_INTRINSIC(MathAbsDouble)
+UNIMPLEMENTED_INTRINSIC(MathAbsFloat)
+UNIMPLEMENTED_INTRINSIC(MathAbsInt)
+UNIMPLEMENTED_INTRINSIC(MathAbsLong)
+UNIMPLEMENTED_INTRINSIC(MathMinDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(MathMinFloatFloat)
+UNIMPLEMENTED_INTRINSIC(MathMaxDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(MathMaxFloatFloat)
+UNIMPLEMENTED_INTRINSIC(MathMinIntInt)
+UNIMPLEMENTED_INTRINSIC(MathMinLongLong)
+UNIMPLEMENTED_INTRINSIC(MathMaxIntInt)
+UNIMPLEMENTED_INTRINSIC(MathMaxLongLong)
+UNIMPLEMENTED_INTRINSIC(MathSqrt)
+UNIMPLEMENTED_INTRINSIC(MathCeil)
+UNIMPLEMENTED_INTRINSIC(MathFloor)
+UNIMPLEMENTED_INTRINSIC(MathRint)
+UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(MemoryPeekByte)
+UNIMPLEMENTED_INTRINSIC(MemoryPeekIntNative)
+UNIMPLEMENTED_INTRINSIC(MemoryPeekLongNative)
+UNIMPLEMENTED_INTRINSIC(MemoryPeekShortNative)
+UNIMPLEMENTED_INTRINSIC(MemoryPokeByte)
+UNIMPLEMENTED_INTRINSIC(MemoryPokeIntNative)
+UNIMPLEMENTED_INTRINSIC(MemoryPokeLongNative)
+UNIMPLEMENTED_INTRINSIC(MemoryPokeShortNative)
+UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
+UNIMPLEMENTED_INTRINSIC(UnsafeGet)
+UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
+UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
+UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafePut)
+UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
+UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
+UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
+UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
+UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
+UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
+UNIMPLEMENTED_INTRINSIC(StringCharAt)
+UNIMPLEMENTED_INTRINSIC(StringCompareTo)
+UNIMPLEMENTED_INTRINSIC(StringEquals)
+UNIMPLEMENTED_INTRINSIC(StringIndexOf)
+UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
+UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
+UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+
+UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
+
+#undef UNIMPLEMENTED_INTRINSIC
+
+#undef __
+
+}  // namespace mips
+}  // namespace art
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
new file mode 100644
index 0000000..c71b3c6
--- /dev/null
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
+
+#include "intrinsics.h"
+
+namespace art {
+
+class ArenaAllocator;
+class HInvokeStaticOrDirect;
+class HInvokeVirtual;
+
+namespace mips {
+
+class CodeGeneratorMIPS;
+class MipsAssembler;
+
+class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
+ public:
+  explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
+
+  // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache)   \
+  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+  // Check whether an invoke is an intrinsic, and if so, create a location summary. Returns whether
+  // a corresponding LocationSummary with the intrinsified_ flag set was generated and attached to
+  // the invoke.
+  bool TryDispatch(HInvoke* invoke);
+
+ private:
+  ArenaAllocator* arena_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
+};
+
+class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor {
+ public:
+  explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
+
+  // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache)   \
+  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+ private:
+  MipsAssembler* GetAssembler();
+
+  ArenaAllocator* GetAllocator();
+
+  CodeGeneratorMIPS* codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS);
+};
+
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0ab0b80..05c7eb0 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1227,6 +1227,91 @@
   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_);
 }
 
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  locations->SetOut(Location::RequiresRegister());
+}
+
+static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorMIPS64* codegen) {
+  Mips64Assembler* assembler = codegen->GetAssembler();
+  GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+  GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+  GpuRegister expected = locations->InAt(3).AsRegister<GpuRegister>();
+  GpuRegister value = locations->InAt(4).AsRegister<GpuRegister>();
+  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+  DCHECK_NE(base, out);
+  DCHECK_NE(offset, out);
+  DCHECK_NE(expected, out);
+
+  // do {
+  //   tmp_value = [tmp_ptr] - expected;
+  // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
+  // result = tmp_value != 0;
+
+  Label loop_head, exit_loop;
+  __ Daddu(TMP, base, offset);
+  __ Sync(0);
+  __ Bind(&loop_head);
+  if (type == Primitive::kPrimLong) {
+    __ Lld(out, TMP);
+  } else {
+    __ Ll(out, TMP);
+  }
+  __ Dsubu(out, out, expected);         // If we didn't get the 'expected'
+  __ Sltiu(out, out, 1);                // value, set 'out' to false, and
+  __ Beqzc(out, &exit_loop);            // return.
+  __ Move(out, value);  // Use 'out' for the 'store conditional' instruction.
+                        // If we use 'value' directly, we would lose 'value'
+                        // in the case that the store fails.  Whether the
+                        // store succeeds, or fails, it will load the
+                        // correct boolean value into the 'out' register.
+  if (type == Primitive::kPrimLong) {
+    __ Scd(out, TMP);
+  } else {
+    __ Sc(out, TMP);
+  }
+  __ Beqzc(out, &loop_head);    // If we couldn't do the read-modify-write
+                                // cycle atomically then retry.
+  __ Bind(&exit_loop);
+  __ Sync(0);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
+  CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
+  GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
+  CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
+  GenCas(invoke->GetLocations(), Primitive::kPrimLong, codegen_);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
+  CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
+  GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+}
+
 // char java.lang.String.charAt(int index)
 void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1502,9 +1587,6 @@
 UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
 UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
 
-UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
 UNIMPLEMENTED_INTRINSIC(StringEquals)
 
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 3480265..8b28ff9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -574,6 +574,17 @@
   return other.blocks_.IsBitSet(header_->GetBlockId());
 }
 
+bool HLoopInformation::IsLoopInvariant(HInstruction* instruction, bool must_dominate) const {
+  HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
+  if (other_loop != this && (other_loop == nullptr || !other_loop->IsIn(*this))) {
+    if (must_dominate) {
+      return instruction->GetBlock()->Dominates(GetHeader());
+    }
+    return true;
+  }
+  return false;
+}
+
 size_t HLoopInformation::GetLifetimeEnd() const {
   size_t last_position = 0;
   for (HBasicBlock* back_edge : GetBackEdges()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6028d4b..7df5866 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -551,6 +551,12 @@
   // Note that `other` *must* be populated before entering this function.
   bool IsIn(const HLoopInformation& other) const;
 
+  // Returns true if instruction is not defined within this loop or any loop nested inside
+  // this loop. If must_dominate is set, only definitions that actually dominate the loop
+  // header can be invariant. Otherwise, any definition outside the loop, including
+  // definitions that appear after the loop, is invariant.
+  bool IsLoopInvariant(HInstruction* instruction, bool must_dominate) const;
+
   const ArenaBitVector& GetBlocks() const { return blocks_; }
 
   void Add(HBasicBlock* block);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 6494964..a128079 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -20,6 +20,7 @@
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "driver/compiler_driver.h"
 #include "nodes.h"
+#include "runtime.h"
 
 namespace art {
 
@@ -78,7 +79,13 @@
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
   } else {
+    bool use_pc_relative_instructions =
+        ((direct_method == 0u || direct_code == static_cast<uintptr_t>(-1))) &&
+        ContainsElement(compiler_driver_->GetDexFilesForOatFile(), target_method.dex_file);
     if (direct_method != 0u) {  // Should we use a direct pointer to the method?
+      // Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
+      // kDirectAddress would be fine for image methods, we don't support it at the moment.
+      DCHECK(!Runtime::Current()->UseJit());
       if (direct_method != static_cast<uintptr_t>(-1)) {  // Is the method pointer known now?
         method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
         method_load_data = direct_method;
@@ -87,24 +94,25 @@
       }
     } else {  // Use dex cache.
       DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
-      DexCacheArraysLayout layout =
-          compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
-      if (layout.Valid()) {  // Can we use PC-relative access to the dex cache arrays?
+      if (use_pc_relative_instructions) {  // Can we use PC-relative access to the dex cache arrays?
+        DCHECK(!Runtime::Current()->UseJit());
         method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
+        DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
+                                    &graph_->GetDexFile());
         method_load_data = layout.MethodOffset(target_method.dex_method_index);
       } else {  // We must go through the ArtMethod's pointer to resolved methods.
         method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
       }
     }
     if (direct_code != 0u) {  // Should we use a direct pointer to the code?
+      // Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
+      // while kCallDirect would be fine for image methods, we don't support it at the moment.
+      DCHECK(!Runtime::Current()->UseJit());
       if (direct_code != static_cast<uintptr_t>(-1)) {  // Is the code pointer known now?
         code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
         direct_code_ptr = direct_code;
-      } else if (compiler_driver_->IsImage() ||
-          target_method.dex_file == &graph_->GetDexFile()) {
+      } else if (use_pc_relative_instructions) {
         // Use PC-relative calls for invokes within a multi-dex oat file.
-        // TODO: Recognize when the target dex file is within the current oat file for
-        // app compilation. At the moment we recognize only the boot image as multi-dex.
         code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
       } else {  // The direct pointer will be known at link time.
         // NOTE: This is used for app->boot calls when compiling an app against
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 384b879..8773169 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -445,38 +445,6 @@
   pthread_t pthread_;
 };
 
-static void ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
-  std::string::size_type colon = s.find(c);
-  if (colon == std::string::npos) {
-    Usage("Missing char %c in option %s\n", c, s.c_str());
-  }
-  // Add one to remove the char we were trimming until.
-  *parsed_value = s.substr(colon + 1);
-}
-
-static void ParseDouble(const std::string& option, char after_char, double min, double max,
-                        double* parsed_value) {
-  std::string substring;
-  ParseStringAfterChar(option, after_char, &substring);
-  bool sane_val = true;
-  double value;
-  if (false) {
-    // TODO: this doesn't seem to work on the emulator.  b/15114595
-    std::stringstream iss(substring);
-    iss >> value;
-    // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
-    sane_val = iss.eof() && (value >= min) && (value <= max);
-  } else {
-    char* end = nullptr;
-    value = strtod(substring.c_str(), &end);
-    sane_val = *end == '\0' && value >= min && value <= max;
-  }
-  if (!sane_val) {
-    Usage("Invalid double value %s for option %s\n", substring.c_str(), option.c_str());
-  }
-  *parsed_value = value;
-}
-
 static constexpr size_t kMinDexFilesForSwap = 2;
 static constexpr size_t kMinDexFileCumulativeSizeForSwap = 20 * MB;
 
@@ -555,66 +523,21 @@
   struct ParserOptions {
     std::string oat_symbols;
     std::string boot_image_filename;
-    const char* compiler_filter_string = nullptr;
-    CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
-    bool compile_pic = false;
-    int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
-    int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
-    int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
-    int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
-    int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
-    static constexpr int kUnsetInlineDepthLimit = -1;
-    int inline_depth_limit = kUnsetInlineDepthLimit;
-    static constexpr int kUnsetInlineMaxCodeUnits = -1;
-    int inline_max_code_units = kUnsetInlineMaxCodeUnits;
-
-    // Profile file to use
-    double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
-
-    bool debuggable = false;
-    bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
-    bool generate_debug_info = kIsDebugBuild;
     bool watch_dog_enabled = true;
-    bool abort_on_hard_verifier_error = false;
     bool requested_specific_compiler = false;
-
-    bool implicit_null_checks = false;
-    bool implicit_so_checks = false;
-    bool implicit_suspend_checks = false;
-
-    PassManagerOptions pass_manager_options;
-
     std::string error_msg;
   };
 
-  template <typename T>
-  static void ParseUintOption(const StringPiece& option,
-                              const std::string& option_name,
-                              T* out,
-                              bool is_long_option = true) {
-    std::string option_prefix = option_name + (is_long_option ? "=" : "");
-    DCHECK(option.starts_with(option_prefix));
-    const char* value_string = option.substr(option_prefix.size()).data();
-    int64_t parsed_integer_value;
-    if (!ParseInt(value_string, &parsed_integer_value)) {
-      Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
-    }
-    if (parsed_integer_value < 0) {
-      Usage("%s passed a negative value %d", option_name.c_str(), parsed_integer_value);
-    }
-    *out = dchecked_integral_cast<T>(parsed_integer_value);
-  }
-
   void ParseZipFd(const StringPiece& option) {
-    ParseUintOption(option, "--zip-fd", &zip_fd_);
+    ParseUintOption(option, "--zip-fd", &zip_fd_, Usage);
   }
 
   void ParseOatFd(const StringPiece& option) {
-    ParseUintOption(option, "--oat-fd", &oat_fd_);
+    ParseUintOption(option, "--oat-fd", &oat_fd_, Usage);
   }
 
   void ParseJ(const StringPiece& option) {
-    ParseUintOption(option, "-j", &thread_count_, /* is_long_option */ false);
+    ParseUintOption(option, "-j", &thread_count_, Usage, /* is_long_option */ false);
   }
 
   void ParseBase(const StringPiece& option) {
@@ -685,80 +608,15 @@
     }
   }
 
-  void ParseHugeMethodMax(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--huge-method-max", &parser_options->huge_method_threshold);
-  }
-
-  void ParseLargeMethodMax(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--large-method-max", &parser_options->large_method_threshold);
-  }
-
-  void ParseSmallMethodMax(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--small-method-max", &parser_options->small_method_threshold);
-  }
-
-  void ParseTinyMethodMax(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--tiny-method-max", &parser_options->tiny_method_threshold);
-  }
-
-  void ParseNumDexMethods(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--num-dex-methods", &parser_options->num_dex_methods_threshold);
-  }
-
-  void ParseInlineDepthLimit(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--inline-depth-limit", &parser_options->inline_depth_limit);
-  }
-
-  void ParseInlineMaxCodeUnits(const StringPiece& option, ParserOptions* parser_options) {
-    ParseUintOption(option, "--inline-max-code-units=", &parser_options->inline_max_code_units);
-  }
-
-  void ParseDisablePasses(const StringPiece& option, ParserOptions* parser_options) {
-    DCHECK(option.starts_with("--disable-passes="));
-    const std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
-    parser_options->pass_manager_options.SetDisablePassList(disable_passes);
-  }
-
-  void ParsePrintPasses(const StringPiece& option, ParserOptions* parser_options) {
-    DCHECK(option.starts_with("--print-passes="));
-    const std::string print_passes = option.substr(strlen("--print-passes=")).data();
-    parser_options->pass_manager_options.SetPrintPassList(print_passes);
-  }
-
-  void ParseDumpCfgPasses(const StringPiece& option, ParserOptions* parser_options) {
-    DCHECK(option.starts_with("--dump-cfg-passes="));
-    const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
-    parser_options->pass_manager_options.SetDumpPassList(dump_passes_string);
-  }
-
-  void ParsePassOptions(const StringPiece& option, ParserOptions* parser_options) {
-    DCHECK(option.starts_with("--pass-options="));
-    const std::string pass_options = option.substr(strlen("--pass-options=")).data();
-    parser_options->pass_manager_options.SetOverriddenPassOptions(pass_options);
-  }
-
-  void ParseDumpInitFailures(const StringPiece& option) {
-    DCHECK(option.starts_with("--dump-init-failures="));
-    std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
-    init_failure_output_.reset(new std::ofstream(file_name));
-    if (init_failure_output_.get() == nullptr) {
-      LOG(ERROR) << "Failed to allocate ofstream";
-    } else if (init_failure_output_->fail()) {
-      LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
-                 << "failures.";
-      init_failure_output_.reset();
-    }
-  }
-
   void ParseSwapFd(const StringPiece& option) {
-    ParseUintOption(option, "--swap-fd", &swap_fd_);
+    ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
   }
 
   void ProcessOptions(ParserOptions* parser_options) {
     image_ = (!image_filename_.empty());
     if (image_) {
       // We need the boot image to always be debuggable.
-      parser_options->debuggable = true;
+      compiler_options_->debuggable_ = true;
     }
 
     if (oat_filename_.empty() && oat_fd_ == -1) {
@@ -882,44 +740,19 @@
       }
     }
 
-    if (parser_options->compiler_filter_string == nullptr) {
-      parser_options->compiler_filter_string = "speed";
-    }
-
-    CHECK(parser_options->compiler_filter_string != nullptr);
-    if (strcmp(parser_options->compiler_filter_string, "verify-none") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kVerifyNone;
-    } else if (strcmp(parser_options->compiler_filter_string, "interpret-only") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kInterpretOnly;
-    } else if (strcmp(parser_options->compiler_filter_string, "verify-at-runtime") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kVerifyAtRuntime;
-    } else if (strcmp(parser_options->compiler_filter_string, "space") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kSpace;
-    } else if (strcmp(parser_options->compiler_filter_string, "balanced") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kBalanced;
-    } else if (strcmp(parser_options->compiler_filter_string, "speed") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kSpeed;
-    } else if (strcmp(parser_options->compiler_filter_string, "everything") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kEverything;
-    } else if (strcmp(parser_options->compiler_filter_string, "time") == 0) {
-      parser_options->compiler_filter = CompilerOptions::kTime;
-    } else {
-      Usage("Unknown --compiler-filter value %s", parser_options->compiler_filter_string);
-    }
-
     // It they are not set, use default values for inlining settings.
     // TODO: We should rethink the compiler filter. We mostly save
     // time here, which is orthogonal to space.
-    if (parser_options->inline_depth_limit == ParserOptions::kUnsetInlineDepthLimit) {
-      parser_options->inline_depth_limit =
-          (parser_options->compiler_filter == CompilerOptions::kSpace)
+    if (compiler_options_->inline_depth_limit_ == CompilerOptions::kUnsetInlineDepthLimit) {
+      compiler_options_->inline_depth_limit_ =
+          (compiler_options_->compiler_filter_ == CompilerOptions::kSpace)
           // Implementation of the space filter: limit inlining depth.
           ? CompilerOptions::kSpaceFilterInlineDepthLimit
           : CompilerOptions::kDefaultInlineDepthLimit;
     }
-    if (parser_options->inline_max_code_units == ParserOptions::kUnsetInlineMaxCodeUnits) {
-      parser_options->inline_max_code_units =
-          (parser_options->compiler_filter == CompilerOptions::kSpace)
+    if (compiler_options_->inline_max_code_units_ == CompilerOptions::kUnsetInlineMaxCodeUnits) {
+      compiler_options_->inline_max_code_units_ =
+          (compiler_options_->compiler_filter_ == CompilerOptions::kSpace)
           // Implementation of the space filter: limit inlining max code units.
           ? CompilerOptions::kSpaceFilterInlineMaxCodeUnits
           : CompilerOptions::kDefaultInlineMaxCodeUnits;
@@ -935,8 +768,8 @@
       case kX86_64:
       case kMips:
       case kMips64:
-        parser_options->implicit_null_checks = true;
-        parser_options->implicit_so_checks = true;
+        compiler_options_->implicit_null_checks_ = true;
+        compiler_options_->implicit_so_checks_ = true;
         break;
 
       default:
@@ -944,29 +777,7 @@
         break;
     }
 
-    compiler_options_.reset(new CompilerOptions(parser_options->compiler_filter,
-                                                parser_options->huge_method_threshold,
-                                                parser_options->large_method_threshold,
-                                                parser_options->small_method_threshold,
-                                                parser_options->tiny_method_threshold,
-                                                parser_options->num_dex_methods_threshold,
-                                                parser_options->inline_depth_limit,
-                                                parser_options->inline_max_code_units,
-                                                parser_options->include_patch_information,
-                                                parser_options->top_k_profile_threshold,
-                                                parser_options->debuggable,
-                                                parser_options->generate_debug_info,
-                                                parser_options->implicit_null_checks,
-                                                parser_options->implicit_so_checks,
-                                                parser_options->implicit_suspend_checks,
-                                                parser_options->compile_pic,
-                                                verbose_methods_.empty() ?
-                                                    nullptr :
-                                                    &verbose_methods_,
-                                                new PassManagerOptions(
-                                                    parser_options->pass_manager_options),
-                                                init_failure_output_.get(),
-                                                parser_options->abort_on_hard_verifier_error));
+    compiler_options_->verbose_methods_ = verbose_methods_.empty() ? nullptr : &verbose_methods_;
 
     // Done with usage checks, enable watchdog if requested
     if (parser_options->watch_dog_enabled) {
@@ -977,7 +788,7 @@
     key_value_store_.reset(new SafeMap<std::string, std::string>());
   }
 
-  void InsertCompileOptions(int argc, char** argv, ParserOptions* parser_options) {
+  void InsertCompileOptions(int argc, char** argv) {
     std::ostringstream oss;
     for (int i = 0; i < argc; ++i) {
       if (i > 0) {
@@ -991,10 +802,10 @@
     key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
     key_value_store_->Put(
         OatHeader::kPicKey,
-        parser_options->compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+        compiler_options_->compile_pic_ ? OatHeader::kTrueValue : OatHeader::kFalseValue);
     key_value_store_->Put(
         OatHeader::kDebuggableKey,
-        parser_options->debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+        compiler_options_->debuggable_ ? OatHeader::kTrueValue : OatHeader::kFalseValue);
   }
 
   // Parse the arguments from the command line. In case of an unrecognized option or impossible
@@ -1015,6 +826,7 @@
     }
 
     std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+    compiler_options_.reset(new CompilerOptions());
 
     for (int i = 0; i < argc; i++) {
       const StringPiece option(argv[i]);
@@ -1072,24 +884,11 @@
         ParseInstructionSetFeatures(option, parser_options.get());
       } else if (option.starts_with("--compiler-backend=")) {
         ParseCompilerBackend(option, parser_options.get());
-      } else if (option.starts_with("--compiler-filter=")) {
-        parser_options->compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
-      } else if (option == "--compile-pic") {
-        parser_options->compile_pic = true;
-      } else if (option.starts_with("--huge-method-max=")) {
-        ParseHugeMethodMax(option, parser_options.get());
-      } else if (option.starts_with("--large-method-max=")) {
-        ParseLargeMethodMax(option, parser_options.get());
-      } else if (option.starts_with("--small-method-max=")) {
-        ParseSmallMethodMax(option, parser_options.get());
-      } else if (option.starts_with("--tiny-method-max=")) {
-        ParseTinyMethodMax(option, parser_options.get());
-      } else if (option.starts_with("--num-dex-methods=")) {
-        ParseNumDexMethods(option, parser_options.get());
-      } else if (option.starts_with("--inline-depth-limit=")) {
-        ParseInlineDepthLimit(option, parser_options.get());
-      } else if (option.starts_with("--inline-max-code-units=")) {
-        ParseInlineMaxCodeUnits(option, parser_options.get());
+      } else if (option.starts_with("--profile-file=")) {
+        profile_file_ = option.substr(strlen("--profile-file=")).data();
+        VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
+      } else if (option == "--no-profile-file") {
+        // No profile
       } else if (option == "--host") {
         is_host_ = true;
       } else if (option == "--runtime-arg") {
@@ -1110,52 +909,16 @@
         dump_cfg_append_ = true;
       } else if (option == "--dump-stats") {
         dump_stats_ = true;
-      } else if (option == "--generate-debug-info" || option == "-g") {
-        parser_options->generate_debug_info = true;
-      } else if (option == "--no-generate-debug-info") {
-        parser_options->generate_debug_info = false;
-      } else if (option == "--debuggable") {
-        parser_options->debuggable = true;
-        parser_options->generate_debug_info = true;
-      } else if (option.starts_with("--profile-file=")) {
-        profile_file_ = option.substr(strlen("--profile-file=")).data();
-        VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
-      } else if (option == "--no-profile-file") {
-        // No profile
-      } else if (option.starts_with("--top-k-profile-threshold=")) {
-        ParseDouble(option.data(), '=', 0.0, 100.0, &parser_options->top_k_profile_threshold);
-      } else if (option == "--print-pass-names") {
-        parser_options->pass_manager_options.SetPrintPassNames(true);
-      } else if (option.starts_with("--disable-passes=")) {
-        ParseDisablePasses(option, parser_options.get());
-      } else if (option.starts_with("--print-passes=")) {
-        ParsePrintPasses(option, parser_options.get());
-      } else if (option == "--print-all-passes") {
-        parser_options->pass_manager_options.SetPrintAllPasses();
-      } else if (option.starts_with("--dump-cfg-passes=")) {
-        ParseDumpCfgPasses(option, parser_options.get());
-      } else if (option == "--print-pass-options") {
-        parser_options->pass_manager_options.SetPrintPassOptions(true);
-      } else if (option.starts_with("--pass-options=")) {
-        ParsePassOptions(option, parser_options.get());
-      } else if (option == "--include-patch-information") {
-        parser_options->include_patch_information = true;
-      } else if (option == "--no-include-patch-information") {
-        parser_options->include_patch_information = false;
+      } else if (option.starts_with("--swap-file=")) {
+        swap_file_name_ = option.substr(strlen("--swap-file=")).data();
+      } else if (option.starts_with("--swap-fd=")) {
+        ParseSwapFd(option);
       } else if (option.starts_with("--verbose-methods=")) {
         // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
         //       conditional on having verbost methods.
         gLogVerbosity.compiler = false;
         Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
-      } else if (option.starts_with("--dump-init-failures=")) {
-        ParseDumpInitFailures(option);
-      } else if (option.starts_with("--swap-file=")) {
-        swap_file_name_ = option.substr(strlen("--swap-file=")).data();
-      } else if (option.starts_with("--swap-fd=")) {
-        ParseSwapFd(option);
-      } else if (option == "--abort-on-hard-verifier-error") {
-        parser_options->abort_on_hard_verifier_error = true;
-      } else {
+      } else if (!compiler_options_->ParseCompilerOption(option, Usage)) {
         Usage("Unknown argument %s", option.data());
       }
     }
@@ -1163,7 +926,7 @@
     ProcessOptions(parser_options.get());
 
     // Insert some compiler things.
-    InsertCompileOptions(argc, argv, parser_options.get());
+    InsertCompileOptions(argc, argv);
   }
 
   // Check whether the oat output file is writable, and open it for later. Also open a swap file,
@@ -1410,6 +1173,7 @@
       ScopedObjectAccess soa(self);
       dex_caches_.push_back(soa.AddLocalReference<jobject>(
           class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
+      dex_file->CreateTypeLookupTable();
     }
 
     // If we use a swap file, ensure we are above the threshold to make it necessary.
@@ -1495,6 +1259,7 @@
                                      swap_fd_,
                                      profile_file_));
 
+    driver_->SetDexFilesForOatFile(dex_files_);
     driver_->CompileAll(class_loader, dex_files_, timings_);
   }
 
@@ -2005,7 +1770,6 @@
   std::string profile_file_;  // Profile file to use
   TimingLogger* timings_;
   std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
-  std::unique_ptr<std::ostream> init_failure_output_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
 };
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 09d7311..1fdffe3 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -47,6 +47,7 @@
   dex_file_verifier.cc \
   dex_instruction.cc \
   elf_file.cc \
+  fault_handler.cc \
   gc/allocation_record.cc \
   gc/allocator/dlmalloc.cc \
   gc/allocator/rosalloc.cc \
@@ -162,6 +163,7 @@
   os_linux.cc \
   parsed_options.cc \
   primitive.cc \
+  profiler.cc \
   quick_exception_handler.cc \
   quick/inline_method_analyser.cc \
   reference_table.cc \
@@ -176,8 +178,7 @@
   thread_pool.cc \
   trace.cc \
   transaction.cc \
-  profiler.cc \
-  fault_handler.cc \
+  type_lookup_table.cc \
   utf.cc \
   utils.cc \
   verifier/dex_gc_map.cc \
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index be5a15e..9ccabad 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1437,7 +1437,107 @@
 ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
 
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+    // Fast path rosalloc allocation.
+    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+    // x2-x7: free.
+    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
+                                                              // Load the class (x2)
+    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+    cbz    x2, .Lart_quick_alloc_object_rosalloc_slow_path    // Check null class
+                                                              // Check class status.
+    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
+    cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
+    bne    .Lart_quick_alloc_object_rosalloc_slow_path
+                                                              // Add a fake dependence from the
+                                                              // following access flag and size
+                                                              // loads to the status load.
+                                                              // This is to prevent those loads
+                                                              // from being reordered above the
+                                                              // status load and reading wrong
+                                                              // values (an alternative is to use
+                                                              // a load-acquire for the status).
+    eor    x3, x3, x3
+    add    x2, x2, x3
+                                                              // Check access flags has
+                                                              // kAccClassIsFinalizable
+    ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+    tst    x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+    bne    .Lart_quick_alloc_object_rosalloc_slow_path
+    ldr    x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]  // Check if the thread local
+                                                              // allocation stack has room.
+                                                              // ldp won't work due to large offset.
+    ldr    x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+    cmp    x3, x4
+    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
+    ldr    w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (x3)
+    cmp    x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
+                                                              // local allocation
+    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
+                                                              // Compute the rosalloc bracket index
+                                                              // from the size.
+                                                              // Align up the size by the rosalloc
+                                                              // bracket quantum size and divide
+                                                              // by the quantum size and subtract
+                                                              // by 1. This code is a shorter but
+                                                              // equivalent version.
+    sub    x3, x3, #1
+    lsr    x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+                                                              // Load the rosalloc run (x4)
+    add    x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
+    ldr    x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
+                                                              // Load the free list head (x3). This
+                                                              // will be the return val.
+    ldr    x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+    cbz    x3, .Lart_quick_alloc_object_rosalloc_slow_path
+    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+    ldr    x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET]               // Load the next pointer of the head
+                                                              // and update the list head with the
+                                                              // next pointer.
+    str    x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+                                                              // Store the class pointer in the
+                                                              // header. This also overwrites the
+                                                              // next pointer. The offsets are
+                                                              // asserted to match.
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+    POISON_HEAP_REF w2
+    str    w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
+                                                              // Push the new object onto the thread
+                                                              // local allocation stack and
+                                                              // increment the thread local
+                                                              // allocation stack top.
+    ldr    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+    str    w3, [x1], #COMPRESSED_REFERENCE_SIZE               // (Increment x1 as a side effect.)
+    str    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+                                                              // Decrement the size of the free list
+    ldr    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+    sub    x1, x1, #1
+                                                              // TODO: consider combining this store
+                                                              // and the list head store above using
+                                                              // strd.
+    str    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+                                                              // Fence. This is "ish" not "ishst" so
+                                                              // that the code after this allocation
+                                                              // site will see the right values in
+                                                              // the fields of the class.
+                                                              // Alternatively we could use "ishst"
+                                                              // if we use load-acquire for the
+                                                              // class status load.)
+    dmb    ish
+    mov    x0, x3                                             // Set the return value and return.
+    ret
+.Lart_quick_alloc_object_rosalloc_slow_path:
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME      // save callee saves in case of GC
+    mov    x2, xSELF                       // pass Thread::Current
+    bl     artAllocObjectFromCodeRosAlloc  // (uint32_t type_idx, Method* method, Thread*)
+    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_rosalloc
 
     /*
      * Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index fe0afa6..c1279bf 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -163,18 +163,13 @@
     return dex_method_idx;
   }
   const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_);
-  const DexFile::StringId* other_descriptor =
-      other_dexfile.FindStringId(mid_declaring_class_descriptor);
-  if (other_descriptor != nullptr) {
-    const DexFile::TypeId* other_type_id =
-        other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor));
-    if (other_type_id != nullptr) {
-      const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
-          *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
-          other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
-      if (other_mid != nullptr) {
-        return other_dexfile.GetIndexForMethodId(*other_mid);
-      }
+  const DexFile::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
+  if (other_type_id != nullptr) {
+    const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
+        *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
+        other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
+    if (other_mid != nullptr) {
+      return other_dexfile.GetIndexForMethodId(*other_mid);
     }
   }
   return DexFile::kDexNoIndex;
@@ -417,6 +412,13 @@
     if (class_linker->IsQuickResolutionStub(existing_entry_point)) {
       // We are running the generic jni stub, but the entry point of the method has not
       // been updated yet.
+      DCHECK_EQ(pc, 0u) << "Should be a downcall";
+      DCHECK(IsNative());
+      return nullptr;
+    }
+    if (existing_entry_point == GetQuickInstrumentationEntryPoint()) {
+      // We are running the generic jni stub, but the method is being instrumented.
+      DCHECK_EQ(pc, 0u) << "Should be a downcall";
       DCHECK(IsNative());
       return nullptr;
     }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 69d0799..5de1cac 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -616,10 +616,7 @@
   // initialized.
   {
     const DexFile& dex_file = java_lang_Object->GetDexFile();
-    const DexFile::StringId* void_string_id = dex_file.FindStringId("V");
-    CHECK(void_string_id != nullptr);
-    uint32_t void_string_index = dex_file.GetIndexForStringId(*void_string_id);
-    const DexFile::TypeId* void_type_id = dex_file.FindTypeId(void_string_index);
+    const DexFile::TypeId* void_type_id = dex_file.FindTypeId("V");
     CHECK(void_type_id != nullptr);
     uint16_t void_type_idx = dex_file.GetIndexForTypeId(*void_type_id);
     // Now we resolve void type so the dex cache contains it. We use java.lang.Object class
@@ -2740,17 +2737,13 @@
   for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
     mirror::DexCache* dex_cache = dex_caches->Get(i);
     const DexFile* dex_file = dex_cache->GetDexFile();
-    // Try binary searching the string/type index.
-    const DexFile::StringId* string_id = dex_file->FindStringId(descriptor);
-    if (string_id != nullptr) {
-      const DexFile::TypeId* type_id =
-          dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
-      if (type_id != nullptr) {
-        uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
-        mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
-        if (klass != nullptr) {
-          return klass;
-        }
+    // Try binary searching the type index by descriptor.
+    const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
+    if (type_id != nullptr) {
+      uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
+      mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
+      if (klass != nullptr) {
+        return klass;
       }
     }
   }
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 0926ce3..04b8900 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1032,9 +1032,7 @@
   mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
   ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
   ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*));
-  const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;");
-  ASSERT_TRUE(string_id != nullptr);
-  const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+  const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
   ASSERT_TRUE(type_id != nullptr);
   uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
   mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, soa.Self(), true, false);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b17b76e..7117be9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -69,29 +69,26 @@
   return alloc_record_count;
 }
 
-class Breakpoint {
+class Breakpoint : public ValueObject {
  public:
-  Breakpoint(ArtMethod* method, uint32_t dex_pc,
-             DeoptimizationRequest::Kind deoptimization_kind)
-    SHARED_REQUIRES(Locks::mutator_lock_)
-    : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
+  Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
+    : method_(method),
+      dex_pc_(dex_pc),
+      deoptimization_kind_(deoptimization_kind) {
     CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
           deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
           deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    method_ = soa.EncodeMethod(method);
   }
 
   Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
-    : method_(nullptr), dex_pc_(other.dex_pc_),
-      deoptimization_kind_(other.deoptimization_kind_) {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    method_ = soa.EncodeMethod(other.Method());
-  }
+    : method_(other.method_),
+      dex_pc_(other.dex_pc_),
+      deoptimization_kind_(other.deoptimization_kind_) {}
 
-  ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    return soa.DecodeMethod(method_);
+  // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
+  // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
+  ArtMethod* Method() const {
+    return method_;
   }
 
   uint32_t DexPc() const {
@@ -104,7 +101,7 @@
 
  private:
   // The location of this breakpoint.
-  jmethodID method_;
+  ArtMethod* method_;
   uint32_t dex_pc_;
 
   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index ae62e2b..3a93aac 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -37,6 +37,7 @@
 #include "dex_file-inl.h"
 #include "dex_file_verifier.h"
 #include "globals.h"
+#include "handle_scope-inl.h"
 #include "leb128.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
@@ -44,8 +45,8 @@
 #include "os.h"
 #include "reflection.h"
 #include "safe_map.h"
-#include "handle_scope-inl.h"
 #include "thread.h"
+#include "type_lookup_table.h"
 #include "utf-inl.h"
 #include "utils.h"
 #include "well_known_classes.h"
@@ -414,11 +415,19 @@
       method_ids_(reinterpret_cast<const MethodId*>(base + header_->method_ids_off_)),
       proto_ids_(reinterpret_cast<const ProtoId*>(base + header_->proto_ids_off_)),
       class_defs_(reinterpret_cast<const ClassDef*>(base + header_->class_defs_off_)),
-      find_class_def_misses_(0),
-      class_def_index_(nullptr),
       oat_dex_file_(oat_dex_file) {
   CHECK(begin_ != nullptr) << GetLocation();
   CHECK_GT(size_, 0U) << GetLocation();
+  const uint8_t* lookup_data = (oat_dex_file != nullptr)
+      ? oat_dex_file->GetLookupTableData()
+      : nullptr;
+  if (lookup_data != nullptr) {
+    if (lookup_data + TypeLookupTable::RawDataLength(*this) > oat_dex_file->GetOatFile()->End()) {
+      LOG(WARNING) << "found truncated lookup table in " << GetLocation();
+    } else {
+      lookup_table_.reset(TypeLookupTable::Open(lookup_data, *this));
+    }
+  }
 }
 
 DexFile::~DexFile() {
@@ -426,8 +435,6 @@
   // that's only called after DetachCurrentThread, which means there's no JNIEnv. We could
   // re-attach, but cleaning up these global references is not obviously useful. It's not as if
   // the global reference table is otherwise empty!
-  // Remove the index if one were created.
-  delete class_def_index_.LoadRelaxed();
 }
 
 bool DexFile::Init(std::string* error_msg) {
@@ -477,51 +484,26 @@
 
 const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t hash) const {
   DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
-  // If we have an index lookup the descriptor via that as its constant time to search.
-  Index* index = class_def_index_.LoadSequentiallyConsistent();
-  if (index != nullptr) {
-    auto it = index->FindWithHash(descriptor, hash);
-    return (it == index->end()) ? nullptr : it->second;
+  if (LIKELY(lookup_table_ != nullptr)) {
+    const uint32_t class_def_idx = lookup_table_->Lookup(descriptor, hash);
+    return (class_def_idx != DexFile::kDexNoIndex) ? &GetClassDef(class_def_idx) : nullptr;
   }
+
   // Fast path for rate no class defs case.
-  uint32_t num_class_defs = NumClassDefs();
+  const uint32_t num_class_defs = NumClassDefs();
   if (num_class_defs == 0) {
     return nullptr;
   }
-  // Search for class def with 2 binary searches and then a linear search.
-  const StringId* string_id = FindStringId(descriptor);
-  if (string_id != nullptr) {
-    const TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
-    if (type_id != nullptr) {
-      uint16_t type_idx = GetIndexForTypeId(*type_id);
-      for (size_t i = 0; i < num_class_defs; ++i) {
-        const ClassDef& class_def = GetClassDef(i);
-        if (class_def.class_idx_ == type_idx) {
-          return &class_def;
-        }
+  const TypeId* type_id = FindTypeId(descriptor);
+  if (type_id != nullptr) {
+    uint16_t type_idx = GetIndexForTypeId(*type_id);
+    for (size_t i = 0; i < num_class_defs; ++i) {
+      const ClassDef& class_def = GetClassDef(i);
+      if (class_def.class_idx_ == type_idx) {
+        return &class_def;
       }
     }
   }
-  // A miss. If we've had kMaxFailedDexClassDefLookups misses then build an index to speed things
-  // up. This isn't done eagerly at construction as construction is not performed in multi-threaded
-  // sections of tools like dex2oat. If we're lazy we hopefully increase the chance of balancing
-  // out which thread builds the index.
-  const uint32_t kMaxFailedDexClassDefLookups = 100;
-  uint32_t old_misses = find_class_def_misses_.FetchAndAddSequentiallyConsistent(1);
-  if (old_misses == kMaxFailedDexClassDefLookups) {
-    // Are we the ones moving the miss count past the max? Sanity check the index doesn't exist.
-    CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
-    // Build the index.
-    index = new Index();
-    for (uint32_t i = 0; i < num_class_defs;  ++i) {
-      const ClassDef& class_def = GetClassDef(i);
-      const char* class_descriptor = GetClassDescriptor(class_def);
-      index->Insert(std::make_pair(class_descriptor, &class_def));
-    }
-    // Sanity check the index still doesn't exist, only 1 thread should build it.
-    CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
-    class_def_index_.StoreSequentiallyConsistent(index);
-  }
   return nullptr;
 }
 
@@ -625,6 +607,26 @@
   return nullptr;
 }
 
+const DexFile::TypeId* DexFile::FindTypeId(const char* string) const {
+  int32_t lo = 0;
+  int32_t hi = NumTypeIds() - 1;
+  while (hi >= lo) {
+    int32_t mid = (hi + lo) / 2;
+    const TypeId& type_id = GetTypeId(mid);
+    const DexFile::StringId& str_id = GetStringId(type_id.descriptor_idx_);
+    const char* str = GetStringData(str_id);
+    int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
+    if (compare > 0) {
+      lo = mid + 1;
+    } else if (compare < 0) {
+      hi = mid - 1;
+    } else {
+      return &type_id;
+    }
+  }
+  return nullptr;
+}
+
 const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
   int32_t lo = 0;
   int32_t hi = NumStringIds() - 1;
@@ -697,6 +699,10 @@
   return nullptr;
 }
 
+void DexFile::CreateTypeLookupTable() const {
+  lookup_table_.reset(TypeLookupTable::Create(*this));
+}
+
 // Given a signature place the type ids into the given vector
 bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type_idx,
                              std::vector<uint16_t>* param_type_idxs) const {
@@ -732,11 +738,7 @@
     }
     // TODO: avoid creating a std::string just to get a 0-terminated char array
     std::string descriptor(signature.data() + start_offset, offset - start_offset);
-    const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
-    if (string_id == nullptr) {
-      return false;
-    }
-    const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
+    const DexFile::TypeId* type_id = FindTypeId(descriptor.c_str());
     if (type_id == nullptr) {
       return false;
     }
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 47e5c12..e7877b2 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -51,6 +51,7 @@
 class Signature;
 template<class T> class Handle;
 class StringPiece;
+class TypeLookupTable;
 class ZipArchive;
 
 // TODO: move all of the macro functionality into the DexCache class.
@@ -532,6 +533,8 @@
   // Looks up a string id for a given modified utf8 string.
   const StringId* FindStringId(const char* string) const;
 
+  const TypeId* FindTypeId(const char* string) const;
+
   // Looks up a string id for a given utf16 string.
   const StringId* FindStringId(const uint16_t* string, size_t length) const;
 
@@ -1139,6 +1142,12 @@
     return oat_dex_file_;
   }
 
+  TypeLookupTable* GetTypeLookupTable() const {
+    return lookup_table_.get();
+  }
+
+  void CreateTypeLookupTable() const;
+
  private:
   // Opens a .dex file
   static std::unique_ptr<const DexFile> OpenFile(int fd, const char* location,
@@ -1237,44 +1246,11 @@
   // Points to the base of the class definition list.
   const ClassDef* const class_defs_;
 
-  // Number of misses finding a class def from a descriptor.
-  mutable Atomic<uint32_t> find_class_def_misses_;
-
-  struct UTF16EmptyFn {
-    void MakeEmpty(std::pair<const char*, const ClassDef*>& pair) const {
-      pair.first = nullptr;
-      pair.second = nullptr;
-    }
-    bool IsEmpty(const std::pair<const char*, const ClassDef*>& pair) const {
-      if (pair.first == nullptr) {
-        DCHECK(pair.second == nullptr);
-        return true;
-      }
-      return false;
-    }
-  };
-  struct UTF16HashCmp {
-    // Hash function.
-    size_t operator()(const char* key) const {
-      return ComputeModifiedUtf8Hash(key);
-    }
-    // std::equal function.
-    bool operator()(const char* a, const char* b) const {
-      return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(a, b) == 0;
-    }
-  };
-  using Index = HashMap<const char*,
-                        const ClassDef*,
-                        UTF16EmptyFn,
-                        UTF16HashCmp,
-                        UTF16HashCmp,
-                        std::allocator<std::pair<const char*, const ClassDef*>>>;
-  mutable Atomic<Index*> class_def_index_;
-
   // If this dex file was loaded from an oat file, oat_dex_file_ contains a
   // pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
   // null.
   const OatDexFile* oat_dex_file_;
+  mutable std::unique_ptr<TypeLookupTable> lookup_table_;
 
   friend class DexFileVerifierTest;
 };
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 90b35a3..0a167bb 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -297,6 +297,7 @@
     ASSERT_TRUE(type_str_id != nullptr);
     uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
     const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
+    ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
     ASSERT_TRUE(type_id != nullptr);
     EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
   }
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 006d2c7..3be7181 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -46,7 +46,7 @@
       DCHECK(Test(obj));
       return true;
     }
-  } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask));
+  } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask));
   DCHECK(Test(obj));
   return false;
 }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e433b8d..20e775c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -339,9 +339,7 @@
         << thread->GetState() << " thread " << thread << " self " << self;
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
-    if (thread->GetState() == kRunnable) {
-      concurrent_copying_->GetBarrier().Pass(self);
-    }
+    concurrent_copying_->GetBarrier().Pass(self);
   }
 
  private:
@@ -514,9 +512,7 @@
     thread->SetIsGcMarking(false);
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
-    if (thread->GetState() == kRunnable) {
-      concurrent_copying_->GetBarrier().Pass(self);
-    }
+    concurrent_copying_->GetBarrier().Pass(self);
   }
 
  private:
@@ -937,9 +933,7 @@
     }
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
-    if (thread->GetState() == kRunnable) {
-      concurrent_copying_->GetBarrier().Pass(self);
-    }
+    concurrent_copying_->GetBarrier().Pass(self);
   }
 
  private:
@@ -1670,7 +1664,7 @@
       // It was updated by the mutator.
       break;
     }
-  } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+  } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
       false, false, kVerifyNone>(offset, expected_ref, new_ref));
 }
 
@@ -1695,7 +1689,7 @@
         // It was updated by the mutator.
         break;
       }
-    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+    } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
   }
 }
 
@@ -1716,7 +1710,7 @@
         // It was updated by the mutator.
         break;
       }
-    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+    } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
   }
 }
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 77a288b..db516a0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1146,9 +1146,7 @@
     }
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
-    if (thread->GetState() == kRunnable) {
-      mark_sweep_->GetBarrier().Pass(self);
-    }
+    mark_sweep_->GetBarrier().Pass(self);
   }
 
  private:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1d38525..ab93142 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1291,9 +1291,7 @@
     ATRACE_END();
     // If thread is a running mutator, then act on behalf of the trim thread.
     // See the code in ThreadList::RunCheckpoint.
-    if (thread->GetState() == kRunnable) {
-      barrier_->Pass(Thread::Current());
-    }
+    barrier_->Pass(Thread::Current());
   }
 
  private:
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e73ba82..1f89f9b 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,7 +43,7 @@
 class Jit {
  public:
   static constexpr bool kStressMode = kIsDebugBuild;
-  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 1000;
+  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 500;
   static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
 
   virtual ~Jit();
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2596dd9..cfccec8 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -57,9 +57,9 @@
     return nullptr;
   }
 
-  // Data cache is 1 / 4 of the map.
+  // Data cache is 1 / 2 of the map.
   // TODO: Make this variable?
-  size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+  size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
   size_t code_size = data_map->Size() - data_size;
   uint8_t* divider = data_map->Begin() + data_size;
 
@@ -369,9 +369,7 @@
     DCHECK(thread == Thread::Current() || thread->IsSuspended());
     MarkCodeVisitor visitor(thread, code_cache_);
     visitor.WalkStack();
-    if (thread->GetState() == kRunnable) {
-      barrier_->Pass(Thread::Current());
-    }
+    barrier_->Pass(Thread::Current());
   }
 
  private:
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 2dd953b..8aaa5fa 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -86,7 +86,6 @@
 }
 
 void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t) {
-  ScopedObjectAccessUnchecked soa(self);
   // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
   // than we want resulting in samples even after the method is compiled.
   if (method->IsClassInitializer() || method->IsNative()) {
@@ -126,6 +125,7 @@
                                                           ArtMethod* caller,
                                                           uint32_t dex_pc,
                                                           ArtMethod* callee ATTRIBUTE_UNUSED) {
+  instrumentation_cache_->AddSamples(thread, caller, 1);
   // We make sure we cannot be suspended, as the profiling info can be concurrently deleted.
   thread->StartAssertNoThreadSuspension("Instrumenting invoke");
   DCHECK(this_object != nullptr);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 90180c5..5c12091 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -95,6 +95,12 @@
       OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
 }
 
+inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
+  // Force use of non-transactional mode and do not check.
+  return CasFieldWeakRelease32<false, false>(
+      OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+}
+
 inline uint32_t Object::GetLockOwnerThreadId() {
   return Monitor::GetLockOwnerThreadId(this);
 }
@@ -175,7 +181,10 @@
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
     new_lw = lw;
     new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
-  } while (!CasLockWordWeakSequentiallyConsistent(expected_lw, new_lw));
+    // This CAS is a CAS release so that when GC updates all the fields of an object and then
+    // changes the object from gray to black, the field updates (stores) will be visible (won't be
+    // reordered after this CAS.)
+  } while (!CasLockWordWeakRelease(expected_lw, new_lw));
   return true;
 #elif USE_BROOKS_READ_BARRIER
   DCHECK(kUseBrooksReadBarrier);
@@ -671,6 +680,24 @@
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
+                                          int32_t old_value, int32_t new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+  AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
+
+  return atomic_addr->CompareExchangeWeakRelease(old_value, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
 inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
                                                            int32_t old_value, int32_t new_value) {
   if (kCheckTransaction) {
@@ -944,6 +971,62 @@
   return success;
 }
 
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
+    MemberOffset field_offset, Object* old_value, Object* new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  if (kVerifyFlags & kVerifyWrites) {
+    VerifyObject(new_value);
+  }
+  if (kVerifyFlags & kVerifyReads) {
+    VerifyObject(old_value);
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+  }
+  HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+  Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+  bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_,
+                                                         new_ref.reference_);
+  return success;
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
+    MemberOffset field_offset, Object* old_value, Object* new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  if (kVerifyFlags & kVerifyWrites) {
+    VerifyObject(new_value);
+  }
+  if (kVerifyFlags & kVerifyReads) {
+    VerifyObject(old_value);
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+  }
+  HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+  Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+  bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref.reference_,
+                                                           new_ref.reference_);
+  return success;
+}
+
 template<bool kIsStatic, typename Visitor>
 inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
   if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f75b8ae..022f31d 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -135,6 +135,8 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
   bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   uint32_t GetLockOwnerThreadId();
 
   mirror::Object* MonitorEnter(Thread* self)
@@ -276,7 +278,6 @@
                                                                    Object* old_value,
                                                                    Object* new_value)
       SHARED_REQUIRES(Locks::mutator_lock_);
-
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
@@ -288,6 +289,18 @@
                                                                      Object* old_value,
                                                                      Object* new_value)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+                                                    Object* old_value,
+                                                    Object* new_value)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+                                                      Object* old_value,
+                                                      Object* new_value)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
@@ -396,6 +409,12 @@
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
+                             int32_t new_value) ALWAYS_INLINE
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
                                               int32_t new_value) ALWAYS_INLINE
       SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index f5a0445..c1284a6 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -307,10 +307,7 @@
   ScopedObjectAccess soa(Thread::Current());
   Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
   ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*));
-  const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
-  ASSERT_TRUE(string_id != nullptr);
-  const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
-      java_lang_dex_file_->GetIndexForStringId(*string_id));
+  const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId("[I");
   ASSERT_TRUE(type_id != nullptr);
   uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
   Object* array = CheckAndAllocArrayFromCodeInstrumented(
@@ -367,16 +364,10 @@
   Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
   Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
   ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
-  const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
-  ASSERT_TRUE(klass_string_id != nullptr);
-  const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
-      dex_file->GetIndexForStringId(*klass_string_id));
+  const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
   ASSERT_TRUE(klass_type_id != nullptr);
 
-  const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
-  ASSERT_TRUE(type_string_id != nullptr);
-  const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
-      dex_file->GetIndexForStringId(*type_string_id));
+  const DexFile::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
   ASSERT_TRUE(type_type_id != nullptr);
 
   const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
diff --git a/runtime/oat.h b/runtime/oat.h
index 276e7f3..5b780c3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,7 +31,7 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  static constexpr uint8_t kOatVersion[] = { '0', '7', '2', '\0' };
+  static constexpr uint8_t kOatVersion[] = { '0', '7', '3', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a162a4e..680f4ac 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -547,6 +547,25 @@
       return false;
     }
     const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
+
+    if (UNLIKELY(oat > End())) {
+      *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
+                                "lookup table offset", GetLocation().c_str(), i,
+                                dex_file_location.c_str());
+      return false;
+    }
+    uint32_t lookup_table_offset = *reinterpret_cast<const uint32_t*>(oat);
+    oat += sizeof(lookup_table_offset);
+    if (Begin() + lookup_table_offset > End()) {
+      *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
+                                "lookup table", GetLocation().c_str(), i,
+                                dex_file_location.c_str());
+      return false;
+    }
+    const uint8_t* lookup_table_data = lookup_table_offset != 0u
+        ? Begin() + lookup_table_offset
+        : nullptr;
+
     const uint32_t* methods_offsets_pointer = reinterpret_cast<const uint32_t*>(oat);
 
     oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
@@ -586,6 +605,7 @@
                                               canonical_location,
                                               dex_file_checksum,
                                               dex_file_pointer,
+                                              lookup_table_data,
                                               methods_offsets_pointer,
                                               current_dex_cache_arrays);
     oat_dex_files_storage_.push_back(oat_dex_file);
@@ -709,6 +729,7 @@
                                 const std::string& canonical_dex_file_location,
                                 uint32_t dex_file_location_checksum,
                                 const uint8_t* dex_file_pointer,
+                                const uint8_t* lookup_table_data,
                                 const uint32_t* oat_class_offsets_pointer,
                                 uint8_t* dex_cache_arrays)
     : oat_file_(oat_file),
@@ -716,6 +737,7 @@
       canonical_dex_file_location_(canonical_dex_file_location),
       dex_file_location_checksum_(dex_file_location_checksum),
       dex_file_pointer_(dex_file_pointer),
+      lookup_table_data_(lookup_table_data),
       oat_class_offsets_pointer_(oat_class_offsets_pointer),
       dex_cache_arrays_(dex_cache_arrays) {}
 
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 6acdf86..0a77654 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -400,6 +400,10 @@
     return dex_cache_arrays_;
   }
 
+  const uint8_t* GetLookupTableData() const {
+    return lookup_table_data_;
+  }
+
   ~OatDexFile();
 
  private:
@@ -408,6 +412,7 @@
              const std::string& canonical_dex_file_location,
              uint32_t dex_file_checksum,
              const uint8_t* dex_file_pointer,
+             const uint8_t* lookup_table_data,
              const uint32_t* oat_class_offsets_pointer,
              uint8_t* dex_cache_arrays);
 
@@ -416,6 +421,7 @@
   const std::string canonical_dex_file_location_;
   const uint32_t dex_file_location_checksum_;
   const uint8_t* const dex_file_pointer_;
+  const uint8_t* lookup_table_data_;
   const uint32_t* const oat_class_offsets_pointer_;
   uint8_t* const dex_cache_arrays_;
 
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 53b4f3a..1552318 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -372,9 +372,14 @@
     StackMapEncoding encoding = code_info.ExtractEncoding();
     StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
     const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
-    DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
     MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
     uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+    DexRegisterMap vreg_map = IsInInlinedFrame()
+        ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
+                                             code_info.GetInlineInfoOf(stack_map, encoding),
+                                             encoding,
+                                             number_of_vregs)
+        : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
 
     for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
       if (updated_vregs != nullptr && updated_vregs[vreg]) {
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 85ac4aa..4998a6a 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -63,7 +63,7 @@
       ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
       if (ref != old_ref) {
-        obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+        obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
             offset, old_ref, ref);
       }
     }
@@ -101,7 +101,7 @@
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
       if (ref != old_ref) {
         Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
-        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+        atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
       }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
@@ -140,7 +140,7 @@
       if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
         auto* atomic_root =
             reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
-        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+        atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
       }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
diff --git a/runtime/stack.h b/runtime/stack.h
index 1276b24..aa7b616 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -698,6 +698,10 @@
     return current_inlining_depth_ != 0;
   }
 
+  size_t GetCurrentInliningDepth() const {
+    return current_inlining_depth_;
+  }
+
   uintptr_t GetCurrentQuickFramePc() const {
     return cur_quick_frame_pc_;
   }
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index bdd5d10..dcf9601 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -60,8 +60,11 @@
 static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
 
 ThreadList::ThreadList()
-    : suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0),
-      suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) {
+    : suspend_all_count_(0),
+      debug_suspend_all_count_(0),
+      unregistering_count_(0),
+      suspend_all_historam_("suspend all histogram", 16, 64),
+      long_suspend_(false) {
   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
 }
 
@@ -195,9 +198,7 @@
       MutexLock mu(self, *Locks::logging_lock_);
       *os_ << local_os.str();
     }
-    if (thread->GetState() == kRunnable) {
-      barrier_.Pass(self);
-    }
+    barrier_.Pass(self);
   }
 
   void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
@@ -285,12 +286,12 @@
     // manually called.
     MutexLock mu(self, *Locks::thread_list_lock_);
     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+    count = list_.size();
     for (const auto& thread : list_) {
       if (thread != self) {
         while (true) {
           if (thread->RequestCheckpoint(checkpoint_function)) {
             // This thread will run its checkpoint some time in the near future.
-            count++;
             break;
           } else {
             // We are probably suspended, try to make sure that we stay suspended.
@@ -383,7 +384,8 @@
 // from-space to to-space refs. Used to synchronize threads at a point
 // to mark the initiation of marking while maintaining the to-space
 // invariant.
-size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
+                                   Closure* flip_callback,
                                    gc::collector::GarbageCollector* collector) {
   TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
   const uint64_t start_time = NanoTime();
@@ -511,7 +513,9 @@
 // Debugger thread might be set to kRunnable for a short period of time after the
 // SuspendAllInternal. This is safe because it will be set back to suspended state before
 // the SuspendAll returns.
-void ThreadList::SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2,
+void ThreadList::SuspendAllInternal(Thread* self,
+                                    Thread* ignore1,
+                                    Thread* ignore2,
                                     bool debug_suspend) {
   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
   Locks::thread_list_lock_->AssertNotHeld(self);
@@ -700,12 +704,14 @@
   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
 }
 
-static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message,
+static void ThreadSuspendByPeerWarning(Thread* self,
+                                       LogSeverity severity,
+                                       const char* message,
                                        jobject peer) {
   JNIEnvExt* env = self->GetJniEnv();
   ScopedLocalRef<jstring>
-      scoped_name_string(env, (jstring)env->GetObjectField(
-          peer, WellKnownClasses::java_lang_Thread_name));
+      scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
+          peer, WellKnownClasses::java_lang_Thread_name)));
   ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
   if (scoped_name_chars.c_str() == nullptr) {
       LOG(severity) << message << ": " << peer;
@@ -715,8 +721,10 @@
   }
 }
 
-Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
-                                        bool debug_suspension, bool* timed_out) {
+Thread* ThreadList::SuspendThreadByPeer(jobject peer,
+                                        bool request_suspension,
+                                        bool debug_suspension,
+                                        bool* timed_out) {
   const uint64_t start_time = NanoTime();
   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
   *timed_out = false;
@@ -813,12 +821,14 @@
   }
 }
 
-static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message,
+static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
+                                           const char* message,
                                            uint32_t thread_id) {
   LOG(severity) << StringPrintf("%s: %d", message, thread_id);
 }
 
-Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
+Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
+                                            bool debug_suspension,
                                             bool* timed_out) {
   const uint64_t start_time = NanoTime();
   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index c727432..07ea10d 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -55,8 +55,8 @@
 
   // Thread suspension support.
   void ResumeAll()
-      UNLOCK_FUNCTION(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      UNLOCK_FUNCTION(Locks::mutator_lock_);
   void Resume(Thread* thread, bool for_debugger = false)
       REQUIRES(!Locks::thread_suspend_count_lock_);
 
@@ -76,7 +76,8 @@
   // is set to true.
   Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
                               bool* timed_out)
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
   // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
@@ -84,14 +85,16 @@
   // the thread terminating. Note that as thread ids are recycled this may not suspend the expected
   // thread, that may be terminating. If the suspension times out then *timeout is set to true.
   Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
   // Find an already suspended thread (or self) by its id.
   Thread* FindThreadByThreadId(uint32_t thin_lock_id);
 
   // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
-  // of the suspend check. Returns how many checkpoints we should expect to run.
+  // of the suspend check. Returns how many checkpoints that are expected to run, including for
+  // already suspended threads for b/24191051.
   size_t RunCheckpoint(Closure* checkpoint_function)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
@@ -100,14 +103,17 @@
 
   // Flip thread roots from from-space refs to to-space refs. Used by
   // the concurrent copying collector.
-  size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+  size_t FlipThreadRoots(Closure* thread_flip_visitor,
+                         Closure* flip_callback,
                          gc::collector::GarbageCollector* collector)
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
   // Suspends all threads
   void SuspendAllForDebugger()
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
   void SuspendSelfForDebugger()
@@ -126,10 +132,14 @@
 
   // Add/remove current thread from list.
   void Register(Thread* self)
-      REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_,
+      REQUIRES(Locks::runtime_shutdown_lock_)
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
-  void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
-                                         !Locks::thread_suspend_count_lock_);
+  void Unregister(Thread* self)
+      REQUIRES(!Locks::mutator_lock_,
+               !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   void VisitRoots(RootVisitor* visitor) const
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -159,7 +169,9 @@
   void WaitForOtherNonDaemonThreadsToExit()
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
-  void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr,
+  void SuspendAllInternal(Thread* self,
+                          Thread* ignore1,
+                          Thread* ignore2 = nullptr,
                           bool debug_suspend = false)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
@@ -200,8 +212,8 @@
               !Locks::mutator_lock_);
   // No REQUIRES(mutator_lock_) since the unlock function already asserts this.
   ~ScopedSuspendAll()
-      UNLOCK_FUNCTION(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      UNLOCK_FUNCTION(Locks::mutator_lock_);
 };
 
 }  // namespace art
diff --git a/runtime/type_lookup_table.cc b/runtime/type_lookup_table.cc
new file mode 100644
index 0000000..0d40bb7
--- /dev/null
+++ b/runtime/type_lookup_table.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_lookup_table.h"
+
+#include "dex_file-inl.h"
+#include "utf-inl.h"
+#include "utils.h"
+
+#include <memory>
+#include <cstring>
+
+namespace art {
+
+static uint16_t MakeData(uint16_t class_def_idx, uint32_t hash, uint32_t mask) {
+  uint16_t hash_mask = static_cast<uint16_t>(~mask);
+  return (static_cast<uint16_t>(hash) & hash_mask) | class_def_idx;
+}
+
+TypeLookupTable::~TypeLookupTable() {
+  if (!owns_entries_) {
+    // We don't actually own the entries, don't let the unique_ptr release them.
+    entries_.release();
+  }
+}
+
+uint32_t TypeLookupTable::RawDataLength() const {
+  return RawDataLength(dex_file_);
+}
+
+uint32_t TypeLookupTable::RawDataLength(const DexFile& dex_file) {
+  return RoundUpToPowerOfTwo(dex_file.NumClassDefs()) * sizeof(Entry);
+}
+
+TypeLookupTable* TypeLookupTable::Create(const DexFile& dex_file) {
+  const uint32_t num_class_defs = dex_file.NumClassDefs();
+  return (num_class_defs == 0 || num_class_defs > std::numeric_limits<uint16_t>::max())
+      ? nullptr
+      : new TypeLookupTable(dex_file);
+}
+
+TypeLookupTable* TypeLookupTable::Open(const uint8_t* raw_data, const DexFile& dex_file) {
+  return new TypeLookupTable(raw_data, dex_file);
+}
+
+TypeLookupTable::TypeLookupTable(const DexFile& dex_file)
+    : dex_file_(dex_file),
+      mask_(RoundUpToPowerOfTwo(dex_file.NumClassDefs()) - 1),
+      entries_(new Entry[mask_ + 1]),
+      owns_entries_(true) {
+  std::vector<uint16_t> conflict_class_defs;
+  // The first stage. Put elements on their initial positions. If an initial position is already
+  // occupied then delay the insertion of the element to the second stage to reduce probing
+  // distance.
+  for (size_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+    const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+    const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
+    Entry entry;
+    entry.str_offset = str_id.string_data_off_;
+    entry.data = MakeData(i, hash, GetSizeMask());
+    if (!SetOnInitialPos(entry, hash)) {
+      conflict_class_defs.push_back(i);
+    }
+  }
+  // The second stage. The initial position of these elements had a collision. Put these elements
+  // into the nearest free cells and link them together by updating next_pos_delta.
+  for (uint16_t class_def_idx : conflict_class_defs) {
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+    const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+    const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
+    Entry entry;
+    entry.str_offset = str_id.string_data_off_;
+    entry.data = MakeData(class_def_idx, hash, GetSizeMask());
+    Insert(entry, hash);
+  }
+}
+
+TypeLookupTable::TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file)
+    : dex_file_(dex_file),
+      mask_(RoundUpToPowerOfTwo(dex_file.NumClassDefs()) - 1),
+      entries_(reinterpret_cast<Entry*>(const_cast<uint8_t*>(raw_data))),
+      owns_entries_(false) {}
+
+bool TypeLookupTable::SetOnInitialPos(const Entry& entry, uint32_t hash) {
+  const uint32_t pos = hash & GetSizeMask();
+  if (!entries_[pos].IsEmpty()) {
+    return false;
+  }
+  entries_[pos] = entry;
+  entries_[pos].next_pos_delta = 0;
+  return true;
+}
+
+void TypeLookupTable::Insert(const Entry& entry, uint32_t hash) {
+  uint32_t pos = FindLastEntryInBucket(hash & GetSizeMask());
+  uint32_t next_pos = (pos + 1) & GetSizeMask();
+  while (!entries_[next_pos].IsEmpty()) {
+    next_pos = (next_pos + 1) & GetSizeMask();
+  }
+  const uint32_t delta = (next_pos >= pos) ? (next_pos - pos) : (next_pos + Size() - pos);
+  entries_[pos].next_pos_delta = delta;
+  entries_[next_pos] = entry;
+  entries_[next_pos].next_pos_delta = 0;
+}
+
+uint32_t TypeLookupTable::FindLastEntryInBucket(uint32_t pos) const {
+  const Entry* entry = &entries_[pos];
+  while (!entry->IsLast()) {
+    pos = (pos + entry->next_pos_delta) & GetSizeMask();
+    entry = &entries_[pos];
+  }
+  return pos;
+}
+
+}  // namespace art
diff --git a/runtime/type_lookup_table.h b/runtime/type_lookup_table.h
new file mode 100644
index 0000000..3c2295c
--- /dev/null
+++ b/runtime/type_lookup_table.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
+#define ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
+
+#include "dex_file.h"
+#include "leb128.h"
+#include "utf.h"
+
+namespace art {
+
+/**
+ * TypeLookupTable used to find class_def_idx by class descriptor quickly.
+ * Implementation of TypeLookupTable is based on hash table.
+ * This class instantiated at compile time by calling Create() method and written into OAT file.
+ * At runtime, the raw data is read from memory-mapped file by calling Open() method. The table
+ * memory remains clean.
+ */
+class TypeLookupTable {
+ public:
+  ~TypeLookupTable();
+
+  // Return the number of buckets in the lookup table.
+  uint32_t Size() const {
+    return mask_ + 1;
+  }
+
+  // Method search class_def_idx by class descriptor and it's hash.
+  // If no data found then the method returns DexFile::kDexNoIndex
+  ALWAYS_INLINE uint32_t Lookup(const char* str, uint32_t hash) const {
+    uint32_t pos = hash & GetSizeMask();
+    // Thanks to special insertion algorithm, the element at position pos can be empty or start of
+    // bucket.
+    const Entry* entry = &entries_[pos];
+    while (!entry->IsEmpty()) {
+      if (CmpHashBits(entry->data, hash) && IsStringsEquals(str, entry->str_offset)) {
+        return GetClassDefIdx(entry->data);
+      }
+      if (entry->IsLast()) {
+        return DexFile::kDexNoIndex;
+      }
+      pos = (pos + entry->next_pos_delta) & GetSizeMask();
+      entry = &entries_[pos];
+    }
+    return DexFile::kDexNoIndex;
+  }
+
+  // Method creates lookup table for dex file
+  static TypeLookupTable* Create(const DexFile& dex_file);
+
+  // Method opens lookup table from binary data. Lookup table does not owns binary data.
+  static TypeLookupTable* Open(const uint8_t* raw_data, const DexFile& dex_file);
+
+  // Method returns pointer to binary data of lookup table. Used by the oat writer.
+  const uint8_t* RawData() const {
+    return reinterpret_cast<const uint8_t*>(entries_.get());
+  }
+
+  // Method returns length of binary data. Used by the oat writer.
+  uint32_t RawDataLength() const;
+
+  // Method returns length of binary data for the specified dex file.
+  static uint32_t RawDataLength(const DexFile& dex_file);
+
+ private:
+   /**
+    * To find element we need to compare strings.
+    * It is faster to compare first hashes and then strings itself.
+    * But we have no full hash of element of table. But we can use 2 ideas.
+    * 1. All minor bits of hash inside one bucket are equals.
+    * 2. If dex file contains N classes and size of hash table is 2^n (where N <= 2^n)
+    *    then 16-n bits are free. So we can encode part of element's hash into these bits.
+    * So hash of element can be divided on three parts:
+    * XXXX XXXX XXXX YYYY YZZZ ZZZZ ZZZZZ
+    * Z - a part of hash encoded in bucket (these bits of has are same for all elements in bucket) -
+    * n bits
+    * Y - a part of hash that we can write into free 16-n bits (because only n bits used to store
+    * class_def_idx)
+    * X - a part of has that we can't use without increasing increase
+    * So the data element of Entry used to store class_def_idx and part of hash of the entry.
+    */
+  struct Entry {
+    uint32_t str_offset;
+    uint16_t data;
+    uint16_t next_pos_delta;
+
+    Entry() : str_offset(0), data(0), next_pos_delta(0) {}
+
+    bool IsEmpty() const {
+      return str_offset == 0;
+    }
+
+    bool IsLast() const {
+      return next_pos_delta == 0;
+    }
+  };
+
+  // Construct from a dex file.
+  explicit TypeLookupTable(const DexFile& dex_file);
+
+  // Construct from a dex file with existing data.
+  TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file);
+
+  bool IsStringsEquals(const char* str, uint32_t str_offset) const {
+    const uint8_t* ptr = dex_file_.Begin() + str_offset;
+    // Skip string length.
+    DecodeUnsignedLeb128(&ptr);
+    return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(
+        str, reinterpret_cast<const char*>(ptr)) == 0;
+  }
+
+  // Method extracts hash bits from element's data and compare them with
+  // the corresponding bits of the specified hash
+  bool CmpHashBits(uint32_t data, uint32_t hash) const {
+    uint32_t mask = static_cast<uint16_t>(~GetSizeMask());
+    return (hash & mask) == (data & mask);
+  }
+
+  uint32_t GetClassDefIdx(uint32_t data) const {
+    return data & mask_;
+  }
+
+  uint32_t GetSizeMask() const {
+    return mask_;
+  }
+
+  // Attempt to set an entry on it's hash' slot. If there is alrady something there, return false.
+  // Otherwise return true.
+  bool SetOnInitialPos(const Entry& entry, uint32_t hash);
+
+  // Insert an entry, probes until there is an empty slot.
+  void Insert(const Entry& entry, uint32_t hash);
+
+  // Find the last entry in a chain.
+  uint32_t FindLastEntryInBucket(uint32_t cur_pos) const;
+
+  const DexFile& dex_file_;
+  const uint32_t mask_;
+  std::unique_ptr<Entry[]> entries_;
+  // owns_entries_ specifies if the lookup table owns the entries_ array.
+  const bool owns_entries_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeLookupTable);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
diff --git a/runtime/type_lookup_table_test.cc b/runtime/type_lookup_table_test.cc
new file mode 100644
index 0000000..7f500cc
--- /dev/null
+++ b/runtime/type_lookup_table_test.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <memory>
+
+#include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "scoped_thread_state_change.h"
+#include "type_lookup_table.h"
+#include "utf-inl.h"
+
+namespace art {
+
+class TypeLookupTableTest : public CommonRuntimeTest {
+ public:
+  size_t kDexNoIndex = DexFile::kDexNoIndex;  // Make copy to prevent linking errors.
+};
+
+TEST_F(TypeLookupTableTest, CreateLookupTable) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+  std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+  ASSERT_NE(nullptr, table.get());
+  ASSERT_NE(nullptr, table->RawData());
+  ASSERT_EQ(32U, table->RawDataLength());
+}
+
+TEST_F(TypeLookupTableTest, FindNonExistingClassWithoutCollisions) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+  std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+  ASSERT_NE(nullptr, table.get());
+  const char* descriptor = "LBA;";
+  size_t hash = ComputeModifiedUtf8Hash(descriptor);
+  uint32_t class_def_idx = table->Lookup(descriptor, hash);
+  ASSERT_EQ(kDexNoIndex, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindNonExistingClassWithCollisions) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+  std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+  ASSERT_NE(nullptr, table.get());
+  const char* descriptor = "LDA;";
+  size_t hash = ComputeModifiedUtf8Hash(descriptor);
+  uint32_t class_def_idx = table->Lookup(descriptor, hash);
+  ASSERT_EQ(kDexNoIndex, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindClassNoCollisions) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+  std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+  ASSERT_NE(nullptr, table.get());
+  const char* descriptor = "LC;";
+  size_t hash = ComputeModifiedUtf8Hash(descriptor);
+  uint32_t class_def_idx = table->Lookup(descriptor, hash);
+  ASSERT_EQ(2U, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindClassWithCollisions) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+  std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+  ASSERT_NE(nullptr, table.get());
+  const char* descriptor = "LAB;";
+  size_t hash = ComputeModifiedUtf8Hash(descriptor);
+  uint32_t class_def_idx = table->Lookup(descriptor, hash);
+  ASSERT_EQ(1U, class_def_idx);
+}
+
+}  // namespace art
diff --git a/runtime/utils.cc b/runtime/utils.cc
index dee4f9c..48dce63 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1835,4 +1835,43 @@
   os << "Something went wrong, didn't find the method in the class data.";
 }
 
+static void ParseStringAfterChar(const std::string& s,
+                                 char c,
+                                 std::string* parsed_value,
+                                 UsageFn Usage) {
+  std::string::size_type colon = s.find(c);
+  if (colon == std::string::npos) {
+    Usage("Missing char %c in option %s\n", c, s.c_str());
+  }
+  // Add one to remove the char we were trimming until.
+  *parsed_value = s.substr(colon + 1);
+}
+
+void ParseDouble(const std::string& option,
+                 char after_char,
+                 double min,
+                 double max,
+                 double* parsed_value,
+                 UsageFn Usage) {
+  std::string substring;
+  ParseStringAfterChar(option, after_char, &substring, Usage);
+  bool sane_val = true;
+  double value;
+  if ((false)) {
+    // TODO: this doesn't seem to work on the emulator.  b/15114595
+    std::stringstream iss(substring);
+    iss >> value;
+    // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
+    sane_val = iss.eof() && (value >= min) && (value <= max);
+  } else {
+    char* end = nullptr;
+    value = strtod(substring.c_str(), &end);
+    sane_val = *end == '\0' && value >= min && value <= max;
+  }
+  if (!sane_val) {
+    Usage("Invalid double value %s for option %s\n", substring.c_str(), option.c_str());
+  }
+  *parsed_value = value;
+}
+
 }  // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index bd52b68..3690f86 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -26,8 +26,10 @@
 #include <vector>
 
 #include "arch/instruction_set.h"
+#include "base/casts.h"
 #include "base/logging.h"
 #include "base/mutex.h"
+#include "base/stringpiece.h"
 #include "globals.h"
 #include "primitive.h"
 
@@ -35,7 +37,6 @@
 
 namespace art {
 
-class ArtCode;
 class ArtField;
 class ArtMethod;
 class DexFile;
@@ -321,6 +322,34 @@
   return reinterpret_cast<const void*>(code);
 }
 
+using UsageFn = void (*)(const char*, ...);
+
+template <typename T>
+static void ParseUintOption(const StringPiece& option,
+                            const std::string& option_name,
+                            T* out,
+                            UsageFn Usage,
+                            bool is_long_option = true) {
+  std::string option_prefix = option_name + (is_long_option ? "=" : "");
+  DCHECK(option.starts_with(option_prefix));
+  const char* value_string = option.substr(option_prefix.size()).data();
+  int64_t parsed_integer_value = 0;
+  if (!ParseInt(value_string, &parsed_integer_value)) {
+    Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
+  }
+  if (parsed_integer_value < 0) {
+    Usage("%s passed a negative value %d", option_name.c_str(), parsed_integer_value);
+  }
+  *out = dchecked_integral_cast<T>(parsed_integer_value);
+}
+
+void ParseDouble(const std::string& option,
+                 char after_char,
+                 double min,
+                 double max,
+                 double* parsed_value,
+                 UsageFn Usage);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_UTILS_H_
diff --git a/test/541-regression-inlined-deopt/expected.txt b/test/541-regression-inlined-deopt/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/541-regression-inlined-deopt/expected.txt
diff --git a/test/541-regression-inlined-deopt/info.txt b/test/541-regression-inlined-deopt/info.txt
new file mode 100644
index 0000000..209588f
--- /dev/null
+++ b/test/541-regression-inlined-deopt/info.txt
@@ -0,0 +1,4 @@
+Regression test for deopt from optimized code which would use the top-level
+stack map for deopting inlined frames. Test case is written in smali for full
+control over vregs because the previous test 449 would pass because the vreg
+maps at the various inlining depths were similar.
diff --git a/test/541-regression-inlined-deopt/smali/TestCase.smali b/test/541-regression-inlined-deopt/smali/TestCase.smali
new file mode 100644
index 0000000..a109775
--- /dev/null
+++ b/test/541-regression-inlined-deopt/smali/TestCase.smali
@@ -0,0 +1,55 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method private static $inline$depth1([I)V
+    .registers 3
+
+    # Expects array in v2.
+
+    const v0, 0x0
+
+    const v1, 0x3
+    aput v0, p0, v1
+
+    const v1, 0x4
+    aput v0, p0, v1
+
+    return-void
+.end method
+
+.method private static $inline$depth0([I)V
+    .registers 1
+
+    # Expects array in v0.
+
+    invoke-static {p0}, LTestCase;->$inline$depth1([I)V
+    return-void
+.end method
+
+.method public static foo()V
+    .registers 10
+
+    # Create a new array short enough to throw AIOOB in $inline$depth1.
+    # Make sure the reference is not stored in the same vreg as used by
+    # the inlined methods.
+
+    const v5, 0x3
+    new-array v6, v5, [I
+
+    invoke-static {v6}, LTestCase;->$inline$depth0([I)V
+    return-void
+.end method
diff --git a/test/541-regression-inlined-deopt/src/Main.java b/test/541-regression-inlined-deopt/src/Main.java
new file mode 100644
index 0000000..fa79590
--- /dev/null
+++ b/test/541-regression-inlined-deopt/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+public class Main {
+
+  // Workaround for b/18051191.
+  class InnerClass {}
+
+  public static void main(String[] args) throws Throwable {
+    try {
+      Class<?> c = Class.forName("TestCase");
+      Method m = c.getMethod("foo");
+      m.invoke(null, (Object[]) null);
+    } catch (InvocationTargetException ex) {
+      // Code should have thrown AIOOB.
+      if (!(ex.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+        throw ex;
+      }
+    }
+  }
+}
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index 06692f9..3946de3 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -20,14 +20,23 @@
 # Generate the smali Main.smali file or fail
 ${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
 
-if [[ $@ == *"--jvm"* ]]; then
-  # Build the Java files if we are running a --jvm test
-  mkdir -p src
-  mkdir -p classes
-  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
-  ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+USES_JAVA="false"
+if [[ $ARGS == *"--jvm"* ]]; then
+  USES_JAVA="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+  if $JACK -D jack.java.source.version=1.8 >& /dev/null; then
+    USES_JAVA="true"
+  else
+    echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+  fi
 fi
 
-# Build the smali files and make a dex
-${SMALI} -JXmx256m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
-zip "$TEST_NAME.jar" classes.dex
+if [[ "$USES_JAVA" == "true" ]]; then
+  # We are compiling java code, create it.
+  mkdir -p src
+  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+  # Ignore the smali directory.
+  EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
index 5eb851f..03cc624 100755
--- a/test/961-default-iface-resolution-generated/build
+++ b/test/961-default-iface-resolution-generated/build
@@ -17,8 +17,6 @@
 # make us exit on a failure
 set -e
 
-mkdir -p ./smali
-
 # We will be making more files than the ulimit is set to allow. Remove it temporarily.
 OLD_ULIMIT=`ulimit -S`
 ulimit -S unlimited
@@ -28,20 +26,31 @@
 }
 trap 'restore_ulimit' ERR
 
+mkdir -p ./smali
+
 # Generate the smali files and expected.txt or fail
 ./util-src/generate_smali.py ./smali ./expected.txt
 
-if [[ $@ == *"--jvm"* ]]; then
-  # Build the Java files if we are running a --jvm test
-  mkdir -p src
-  mkdir -p classes
-  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
-  ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+USES_JAVA="false"
+if [[ $ARGS == *"--jvm"* ]]; then
+  USES_JAVA="true"
+elif [[ $USE_JACK == "true" ]]; then
+  if "$JACK" -D jack.java.source.version=1.8 >& /dev/null; then
+    USES_JAVA="true"
+  else
+    echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+  fi
 fi
 
-# Build the smali files and make a dex
-${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
-zip $TEST_NAME.jar classes.dex
+if [[ "$USES_JAVA" == "true" ]]; then
+  # We are compiling java code, create it.
+  mkdir -p src
+  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+  # Ignore the smali directory.
+  EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
 
 # Reset the ulimit back to its initial value
 restore_ulimit
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 06bb3bd..24e2feb 100755
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -17,14 +17,23 @@
 # make us exit on a failure
 set -e
 
+USES_JAVA="false"
 if [[ $@ == *"--jvm"* ]]; then
-  # Build the Java files if we are running a --jvm test
-  mkdir -p src
-  mkdir -p classes
-  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
-  ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+  USES_JAVA="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+  if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+    USES_JAVA="true"
+  else
+    echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+  fi
 fi
 
-# Build the smali files and make a dex
-${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
-zip $TEST_NAME.jar classes.dex
+if [[ "$USES_JAVA" == "true" ]]; then
+  # We are compiling java code, create it.
+  mkdir -p src
+  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+  # Ignore the smali directory.
+  EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
index 06bb3bd..24e2feb 100755
--- a/test/963-default-range-smali/build
+++ b/test/963-default-range-smali/build
@@ -17,14 +17,23 @@
 # make us exit on a failure
 set -e
 
+USES_JAVA="false"
 if [[ $@ == *"--jvm"* ]]; then
-  # Build the Java files if we are running a --jvm test
-  mkdir -p src
-  mkdir -p classes
-  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
-  ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+  USES_JAVA="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+  if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+    USES_JAVA="true"
+  else
+    echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+  fi
 fi
 
-# Build the smali files and make a dex
-${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
-zip $TEST_NAME.jar classes.dex
+if [[ "$USES_JAVA" == "true" ]]; then
+  # We are compiling java code, create it.
+  mkdir -p src
+  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+  # Ignore the smali directory.
+  EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
index b0fbe4b..d916f1b 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-generated/build
@@ -29,17 +29,26 @@
 # Generate the smali files and expected.txt or fail
 ./util-src/generate_smali.py ./smali ./expected.txt
 
+USES_JAVA="false"
 if [[ $@ == *"--jvm"* ]]; then
-  # Build the Java files if we are running a --jvm test
-  mkdir -p src
-  mkdir -p classes
-  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
-  ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+  USES_JAVA="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+  if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+    USES_JAVA="true"
+  else
+    echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+  fi
 fi
 
-# Build the smali files and make a dex
-${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
-zip $TEST_NAME.jar classes.dex
+if [[ "$USES_JAVA" == "true" ]]; then
+  # We are compiling java code, create it.
+  mkdir -p src
+  ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+  # Ignore the smali directory.
+  EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
 
 # Reset the ulimit back to its initial value
 restore_ulimit
diff --git a/test/Lookup/A.java b/test/Lookup/A.java
new file mode 100644
index 0000000..666ba18
--- /dev/null
+++ b/test/Lookup/A.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class A {}
diff --git a/test/Lookup/AB.java b/test/Lookup/AB.java
new file mode 100644
index 0000000..b231708
--- /dev/null
+++ b/test/Lookup/AB.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class AB {}
diff --git a/test/Lookup/C.java b/test/Lookup/C.java
new file mode 100644
index 0000000..5b90069
--- /dev/null
+++ b/test/Lookup/C.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class C {}
diff --git a/test/etc/default-build b/test/etc/default-build
index 4743216..7242428 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -17,8 +17,45 @@
 # Stop if something fails.
 set -e
 
+# Set default values for directories.
+if [ -d smali ]; then
+  HAS_SMALI=true
+else
+  HAS_SMALI=false
+fi
+
+if [ -d src ]; then
+  HAS_SRC=true
+else
+  HAS_SRC=false
+fi
+
+if [ -d src2 ]; then
+  HAS_SRC2=true
+else
+  HAS_SRC2=false
+fi
+
+if [ -d src-multidex ]; then
+  HAS_SRC_MULTIDEX=true
+else
+  HAS_SRC_MULTIDEX=false
+fi
+
+if [ -d src-ex ]; then
+  HAS_SRC_EX=true
+else
+  HAS_SRC_EX=false
+fi
+
 DX_FLAGS=""
 SKIP_DX_MERGER="false"
+EXPERIMENTAL=""
+
+# Setup experimental flag mappings in a bash associative array.
+declare -A JACK_EXPERIMENTAL_ARGS
+JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8"
+JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8"
 
 while true; do
   if [ "x$1" = "x--dx-option" ]; then
@@ -28,6 +65,25 @@
     shift
   elif [ "x$1" = "x--jvm" ]; then
     shift
+  elif [ "x$1" = "x--no-src" ]; then
+    HAS_SRC=false
+    shift
+  elif [ "x$1" = "x--no-src2" ]; then
+    HAS_SRC2=false
+    shift
+  elif [ "x$1" = "x--no-src-multidex" ]; then
+    HAS_SRC_MULTIDEX=false
+    shift
+  elif [ "x$1" = "x--no-src-ex" ]; then
+    HAS_SRC_EX=false
+    shift
+  elif [ "x$1" = "x--no-smali" ]; then
+    HAS_SMALI=false
+    shift
+  elif [ "x$1" = "x--experimental" ]; then
+    shift
+    EXPERIMENTAL="${EXPERIMENTAL} $1"
+    shift
   elif expr "x$1" : "x--" >/dev/null 2>&1; then
     echo "unknown $0 option: $1" 1>&2
     exit 1
@@ -36,17 +92,22 @@
   fi
 done
 
+# Add args from the experimental mappings.
+for experiment in ${EXPERIMENTAL}; do
+  JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
+done
+
 if [ -e classes.dex ]; then
   zip $TEST_NAME.jar classes.dex
   exit 0
 fi
 
-if ! [ -d src ] && ! [ -d src2 ]; then
+if ! [ "${HAS_SRC}" = "true" ] && ! [ "${HAS_SRC2}" = "true" ]; then
   # No src directory? Then forget about trying to run dx.
   SKIP_DX_MERGER="true"
 fi
 
-if [ -d src-multidex ]; then
+if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then
   # Jack does not support this configuration unless we specify how to partition the DEX file
   # with a .jpp file.
   USE_JACK="false"
@@ -54,27 +115,29 @@
 
 if [ ${USE_JACK} = "true" ]; then
   # Jack toolchain
-  if [ -d src ]; then
-    ${JACK} --output-jack src.jack src
+  if [ "${HAS_SRC}" = "true" ]; then
+    ${JACK} ${JACK_ARGS} --output-jack src.jack src
     imported_jack_files="--import src.jack"
   fi
 
-  if [ -d src2 ]; then
-    ${JACK} --output-jack src2.jack src2
+  if [ "${HAS_SRC2}" = "true" ]; then
+    ${JACK} ${JACK_ARGS} --output-jack src2.jack src2
     imported_jack_files="--import src2.jack ${imported_jack_files}"
   fi
 
   # Compile jack files into a DEX file. We set jack.import.type.policy=keep-first to consider
   # class definitions from src2 first.
-  ${JACK} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex .
+  if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then
+    ${JACK} ${JACK_ARGS} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex .
+  fi
 else
   # Legacy toolchain with javac+dx
-  if [ -d src ]; then
+  if [ "${HAS_SRC}" = "true" ]; then
     mkdir classes
-    ${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
+    ${JAVAC} ${JAVAC_ARGS} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
   fi
 
-  if [ -d src-multidex ]; then
+  if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then
     mkdir classes2
     ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
     if [ ${NEED_DEX} = "true" ]; then
@@ -83,20 +146,22 @@
     fi
   fi
 
-  if [ -d src2 ]; then
+  if [ "${HAS_SRC2}" = "true" ]; then
     mkdir -p classes
-    ${JAVAC} -d classes `find src2 -name '*.java'`
+    ${JAVAC} ${JAVAC_ARGS} -d classes `find src2 -name '*.java'`
   fi
 
-  if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
-    ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
-      --dump-width=1000 ${DX_FLAGS} classes
+  if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then
+    if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
+      ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
+        --dump-width=1000 ${DX_FLAGS} classes
+    fi
   fi
 fi
 
-if [ -d smali ]; then
+if [ "${HAS_SMALI}" = "true" ]; then
   # Compile Smali classes
-  ${SMALI} -JXmx256m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
+  ${SMALI} -JXmx512m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
 
   # Don't bother with dexmerger if we provide our own main function in a smali file.
   if [ ${SKIP_DX_MERGER} = "false" ]; then
@@ -106,18 +171,18 @@
   fi
 fi
 
-if [ -d src-ex ]; then
+if [ ${HAS_SRC_EX} = "true" ]; then
   if [ ${USE_JACK} = "true" ]; then
       # Rename previous "classes.dex" so it is not overwritten.
       mv classes.dex classes-1.dex
       #TODO find another way to append src.jack to the jack classpath
-      ${JACK}:src.jack --output-dex . src-ex
+      ${JACK}:src.jack ${JACK_ARGS} --output-dex . src-ex
       zip $TEST_NAME-ex.jar classes.dex
       # Restore previous "classes.dex" so it can be zipped.
       mv classes-1.dex classes.dex
   else
     mkdir classes-ex
-    ${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'`
+    ${JAVAC} ${JAVAC_ARGS} -d classes-ex -cp classes `find src-ex -name '*.java'`
     if [ ${NEED_DEX} = "true" ]; then
       ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \
         --dump-width=1000 ${DX_FLAGS} classes-ex
@@ -133,7 +198,7 @@
 fi
 
 # Create a single jar with two dex files for multidex.
-if [ -d src-multidex ]; then
+if [ ${HAS_SRC_MULTIDEX} = "true" ]; then
   zip $TEST_NAME.jar classes.dex classes2.dex
 elif [ ${NEED_DEX} = "true" ]; then
   zip $TEST_NAME.jar classes.dex
diff --git a/test/run-all-tests b/test/run-all-tests
index 76283b7..6d5c28c 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -44,12 +44,45 @@
     elif [ "x$1" = "x--use-java-home" ]; then
         run_args="${run_args} --use-java-home"
         shift
+    elif [ "x$1" = "x--no-image" ]; then
+        run_args="${run_args} --no-image"
+        shift
+    elif [ "x$1" = "x--quick" ]; then
+        run_args="${run_args} --quick"
+        shift
+    elif [ "x$1" = "x--optimizing" ]; then
+        run_args="${run_args} --optimizing"
+        shift
+    elif [ "x$1" = "x--image" ]; then
+        run_args="${run_args} --image"
+        shift
+    elif [ "x$1" = "x--never-clean" ]; then
+        run_args="${run_args} --never-clean"
+        shift
     elif [ "x$1" = "x--jvm" ]; then
         run_args="${run_args} --jvm"
         shift
     elif [ "x$1" = "x--debug" ]; then
         run_args="${run_args} --debug"
         shift
+    elif [ "x$1" = "x--build-only" ]; then
+        run_args="${run_args} --build-only"
+        shift
+    elif [ "x$1" = "x--build-with-jack" ]; then
+        run_args="${run_args} --build-with-jack"
+        shift
+    elif [ "x$1" = "x--build-with-javac-dx" ]; then
+        run_args="${run_args} --build-with-javac-dx"
+        shift
+    elif [ "x$1" = "x--dex2oat-swap" ]; then
+        run_args="${run_args} --dex2oat-swap"
+        shift
+    elif [ "x$1" = "x--dalvik" ]; then
+        run_args="${run_args} --dalvik"
+        shift
+    elif [ "x$1" = "x--debuggable" ]; then
+        run_args="${run_args} --debuggable"
+        shift
     elif [ "x$1" = "x--zygote" ]; then
         run_args="${run_args} --zygote"
         shift
@@ -59,15 +92,15 @@
     elif [ "x$1" = "x--jit" ]; then
         run_args="${run_args} --jit"
         shift
+    elif [ "x$1" = "x--verify-soft-fail" ]; then
+        run_args="${run_args} --verify-soft-fail"
+        shift
     elif [ "x$1" = "x--no-verify" ]; then
         run_args="${run_args} --no-verify"
         shift
     elif [ "x$1" = "x--no-optimize" ]; then
         run_args="${run_args} --no-optimize"
         shift
-    elif [ "x$1" = "x--valgrind" ]; then
-        run_args="${run_args} --valgrind"
-        shift
     elif [ "x$1" = "x--dev" ]; then
         run_args="${run_args} --dev"
         shift
@@ -116,6 +149,15 @@
     elif [ "x$1" = "x--always-clean" ]; then
         run_args="${run_args} --always-clean"
         shift
+    elif [ "x$1" = "x--pic-test" ]; then
+        run_args="${run_args} --pic-test"
+        shift
+    elif [ "x$1" = "x--pic-image" ]; then
+        run_args="${run_args} --pic-image"
+        shift
+    elif [ "x$1" = "x--strace" ]; then
+        run_args="${run_args} --strace"
+        shift
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         usage="yes"
@@ -134,9 +176,13 @@
         echo "  Options are all passed to run-test; refer to that for " \
              "further documentation:"
         echo "    --debug --dev --host --interpreter --jit --jvm --no-optimize"
-        echo "    --no-verify -O --update --valgrind --zygote --64 --relocate"
-        echo "    --prebuild --always-clean --gcstress --gcverify --trace"
-        echo "    --no-patchoat --no-dex2oat --use-java-home"
+        echo "    --no-verify --verify-soft-fail -O --update --zygote --64"
+        echo "    --relocate --prebuild --always-clean --gcstress --gcverify"
+        echo "    --trace --no-patchoat --no-dex2oat --use-java-home --pic-image"
+        echo "    --pic-test --strace --debuggable --dalvik --dex2oat-swap"
+        echo "    --build-only --build-with-jack --build-with-javac-dx"
+        echo "    --never-clean --image --no-image --quick --optimizing"
+        echo "    --no-relocate --no-prebuild"
         echo "  Specific Runtime Options:"
         echo "    --seq                Run tests one-by-one, avoiding failures caused by busy CPU"
     ) 1>&2
diff --git a/test/run-test b/test/run-test
index 5a43fb0..f2bbaa7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -85,7 +85,7 @@
 
 # If JACK_CLASSPATH is not set, assume it only contains core-libart.
 if [ -z "$JACK_CLASSPATH" ]; then
-  export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
+  export JACK_CLASSPATH="${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
 fi
 
 # If JILL_JAR is not set, assume it is located in the prebuilts directory.
@@ -440,7 +440,7 @@
     if [ "$target_mode" = "no" ]; then
         # ANDROID_HOST_OUT is not set in a build environment.
         if [ -z "$ANDROID_HOST_OUT" ]; then
-            export ANDROID_HOST_OUT=$ANDROID_BUILD_TOP/out/host/linux-x86
+            export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out/}host/linux-x86
         fi
         guess_host_arch_name
         run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${pic_image_suffix}.art"
@@ -528,6 +528,7 @@
         echo "    --debug               Wait for a debugger to attach."
         echo "    --debuggable          Whether to compile Java code for a debugger."
         echo "    --gdb                 Run under gdb; incompatible with some tests."
+        echo "    --gdb-arg             Pass an option to gdb."
         echo "    --build-only          Build test files only (off by default)."
         echo "    --build-with-javac-dx Build test files with javac and dx (on by default)."
         echo "    --build-with-jack     Build test files with jack and jill (off by default)."
@@ -553,6 +554,8 @@
         echo "                          the image and oat files be relocated to a random"
         echo "                          address before running. (default)"
         echo "    --no-relocate         Force the use of no relocating in the test"
+        echo "    --image               Run the test using a precompiled boot image. (default)"
+        echo "    --no-image            Run the test without a precompiled boot image."
         echo "    --host                Use the host-mode virtual machine."
         echo "    --invoke-with         Pass --invoke-with option to runtime."
         echo "    --dalvik              Use Dalvik (off by default)."
@@ -564,6 +567,7 @@
              "files."
         echo "    --64                  Run the test in 64-bit mode"
         echo "    --trace               Run with method tracing"
+        echo "    --strace              Run with syscall tracing from strace."
         echo "    --stream              Run method tracing in streaming mode (requires --trace)"
         echo "    --gcstress            Run with gc stress testing"
         echo "    --gcverify            Run with gc verification"
@@ -573,6 +577,9 @@
         echo "    --dex2oat-swap        Use a dex2oat swap file."
         echo "    --instruction-set-features [string]"
         echo "                          Set instruction-set-features for compilation."
+        echo "    --pic-image           Use an image compiled with position independent code for the"
+        echo "                          boot class path."
+        echo "    --pic-test            Compile the test code position independent."
     ) 1>&2
     exit 1
 fi
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 7faf86e..d5b8989 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -30,3 +30,10 @@
 
 echo -e "${green}List properties${nc}"
 adb shell getprop
+
+echo -e "${green}Uptime${nc}"
+adb shell uptime
+
+echo -e "${green}Kill stalled dalvikvm processes${nc}"
+processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}')
+for i in $processes; do adb shell kill -9 $i; done