Merge "Move arena_bit_vector.h/cc to compiler/utils."
diff --git a/Android.mk b/Android.mk
index 06bbc67..3502740 100644
--- a/Android.mk
+++ b/Android.mk
@@ -111,9 +111,9 @@
 
 # The ART_*_TEST_DEPENDENCIES definitions:
 # - depend on Android.oattest.mk above for ART_TEST_*_DEX_FILES
-# - depend on Android.gtest.mk above for ART_*_TEST_EXECUTABLES
-ART_HOST_TEST_DEPENDENCIES   := $(ART_HOST_DEPENDENCIES)   $(ART_HOST_TEST_EXECUTABLES)   $(ART_TEST_HOST_DEX_FILES)   $(HOST_CORE_IMG_OUT)
-ART_TARGET_TEST_DEPENDENCIES := $(ART_TARGET_DEPENDENCIES) $(ART_TARGET_TEST_EXECUTABLES) $(ART_TEST_TARGET_DEX_FILES) $(TARGET_CORE_IMG_OUT)
+# - depend on Android.gtest.mk above for ART_*_GTEST_EXECUTABLES
+ART_HOST_TEST_DEPENDENCIES   := $(ART_HOST_DEPENDENCIES)   $(ART_HOST_GTEST_EXECUTABLES)   $(ART_TEST_HOST_DEX_FILES)   $(HOST_CORE_IMG_OUT)
+ART_TARGET_TEST_DEPENDENCIES := $(ART_TARGET_DEPENDENCIES) $(ART_TARGET_GTEST_EXECUTABLES) $(ART_TEST_TARGET_DEX_FILES) $(TARGET_CORE_IMG_OUT)
 
 include $(art_build_path)/Android.libarttest.mk
 
@@ -150,12 +150,12 @@
 test-art-host-dependencies: $(ART_HOST_TEST_DEPENDENCIES) $(HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) $(HOST_CORE_DEX_LOCATIONS)
 
 .PHONY: test-art-host-gtest
-test-art-host-gtest: $(ART_HOST_TEST_TARGETS)
+test-art-host-gtest: $(ART_HOST_GTEST_TARGETS)
 	@echo test-art-host-gtest PASSED
 
 # "mm valgrind-test-art-host-gtest" to build and run the host gtests under valgrind.
 .PHONY: valgrind-test-art-host-gtest
-valgrind-test-art-host-gtest: $(ART_HOST_VALGRIND_TEST_TARGETS)
+valgrind-test-art-host-gtest: $(ART_HOST_VALGRIND_GTEST_TARGETS)
 	@echo valgrind-test-art-host-gtest PASSED
 
 .PHONY: test-art-host-oat-default
@@ -222,7 +222,7 @@
 	adb shell mkdir -p $(ART_TEST_DIR)
 
 .PHONY: test-art-target-gtest
-test-art-target-gtest: $(ART_TARGET_TEST_TARGETS)
+test-art-target-gtest: $(ART_TARGET_GTEST_TARGETS)
 
 .PHONY: test-art-target-oat
 test-art-target-oat: $(ART_TEST_TARGET_OAT_TARGETS)
@@ -299,10 +299,10 @@
 build-art: build-art-host build-art-target
 
 .PHONY: build-art-host
-build-art-host:   $(ART_HOST_EXECUTABLES)   $(ART_HOST_TEST_EXECUTABLES)   $(HOST_CORE_IMG_OUT)   $(HOST_OUT)/lib/libjavacore.so
+build-art-host:   $(ART_HOST_EXECUTABLES)   $(ART_HOST_GTEST_EXECUTABLES)   $(HOST_CORE_IMG_OUT)   $(HOST_OUT)/lib/libjavacore.so
 
 .PHONY: build-art-target
-build-art-target: $(ART_TARGET_EXECUTABLES) $(ART_TARGET_TEST_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(TARGET_OUT)/lib/libjavacore.so
+build-art-target: $(ART_TARGET_EXECUTABLES) $(ART_TARGET_GTEST_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(TARGET_OUT)/lib/libjavacore.so
 
 ########################################################################
 # oatdump targets
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 67f09f9..2ddd09e 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -16,21 +16,7 @@
 
 LOCAL_PATH := art
 
-TEST_COMMON_SRC_FILES := \
-	compiler/dex/local_value_numbering_test.cc \
-	compiler/driver/compiler_driver_test.cc \
-	compiler/elf_writer_test.cc \
-	compiler/image_test.cc \
-	compiler/jni/jni_compiler_test.cc \
-	compiler/leb128_encoder_test.cc \
-	compiler/oat_test.cc \
-	compiler/optimizing/dominator_test.cc \
-	compiler/optimizing/pretty_printer_test.cc \
-	compiler/output_stream_test.cc \
-	compiler/utils/arena_allocator_test.cc \
-	compiler/utils/dedupe_set_test.cc \
-	compiler/utils/arm/managed_register_arm_test.cc \
-	compiler/utils/x86/managed_register_x86_test.cc \
+RUNTIME_GTEST_COMMON_SRC_FILES := \
 	runtime/barrier_test.cc \
 	runtime/base/bit_vector_test.cc \
 	runtime/base/hex_dump_test.cc \
@@ -57,7 +43,7 @@
 	runtime/indenter_test.cc \
 	runtime/indirect_reference_table_test.cc \
 	runtime/intern_table_test.cc \
-	runtime/jni_internal_test.cc \
+	runtime/leb128_test.cc \
 	runtime/mem_map_test.cc \
 	runtime/mirror/dex_cache_test.cc \
 	runtime/mirror/object_test.cc \
@@ -68,28 +54,50 @@
 	runtime/utils_test.cc \
 	runtime/verifier/method_verifier_test.cc \
 	runtime/verifier/reg_type_test.cc \
-	runtime/zip_archive_test.cc
+	runtime/zip_archive_test.cc \
+
+COMPILER_GTEST_COMMON_SRC_FILES := \
+	runtime/jni_internal_test.cc \
+	compiler/dex/local_value_numbering_test.cc \
+	compiler/driver/compiler_driver_test.cc \
+	compiler/elf_writer_test.cc \
+	compiler/image_test.cc \
+	compiler/jni/jni_compiler_test.cc \
+	compiler/oat_test.cc \
+	compiler/optimizing/dominator_test.cc \
+	compiler/optimizing/pretty_printer_test.cc \
+	compiler/output_stream_test.cc \
+	compiler/utils/arena_allocator_test.cc \
+	compiler/utils/dedupe_set_test.cc \
+	compiler/utils/arm/managed_register_arm_test.cc \
+	compiler/utils/x86/managed_register_x86_test.cc \
 
 ifeq ($(ART_SEA_IR_MODE),true)
-TEST_COMMON_SRC_FILES += \
+COMPILER_GTEST_COMMON_SRC_FILES += \
 	compiler/utils/scoped_hashtable_test.cc \
 	compiler/sea_ir/types/type_data_test.cc \
 	compiler/sea_ir/types/type_inference_visitor_test.cc \
 	compiler/sea_ir/ir/regions_test.cc
 endif
 
-TEST_TARGET_SRC_FILES := \
-	$(TEST_COMMON_SRC_FILES)
+RUNTIME_GTEST_TARGET_SRC_FILES := \
+	$(RUNTIME_GTEST_COMMON_SRC_FILES)
 
-TEST_HOST_SRC_FILES := \
-	$(TEST_COMMON_SRC_FILES) \
+RUNTIME_GTEST_HOST_SRC_FILES := \
+	$(RUNTIME_GTEST_COMMON_SRC_FILES)
+
+COMPILER_GTEST_TARGET_SRC_FILES := \
+	$(COMPILER_GTEST_COMMON_SRC_FILES)
+
+COMPILER_GTEST_HOST_SRC_FILES := \
+	$(COMPILER_GTEST_COMMON_SRC_FILES) \
 	compiler/utils/x86/assembler_x86_test.cc
 
-ART_HOST_TEST_EXECUTABLES :=
-ART_TARGET_TEST_EXECUTABLES :=
-ART_HOST_TEST_TARGETS :=
-ART_HOST_VALGRIND_TEST_TARGETS :=
-ART_TARGET_TEST_TARGETS :=
+ART_HOST_GTEST_EXECUTABLES :=
+ART_TARGET_GTEST_EXECUTABLES :=
+ART_HOST_GTEST_TARGETS :=
+ART_HOST_VALGRIND_GTEST_TARGETS :=
+ART_TARGET_GTEST_TARGETS :=
 
 ART_TEST_CFLAGS :=
 ifeq ($(ART_USE_PORTABLE_COMPILER),true)
@@ -98,6 +106,8 @@
 
 # $(1): target or host
 # $(2): file name
+# $(3): extra C includes
+# $(4): extra shared libraries
 define build-art-test
   ifneq ($(1),target)
     ifneq ($(1),host)
@@ -107,6 +117,8 @@
 
   art_target_or_host := $(1)
   art_gtest_filename := $(2)
+  art_gtest_extra_c_includes := $(3)
+  art_gtest_extra_shared_libraries := $(4)
 
   art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename)))
 
@@ -120,10 +132,10 @@
   ifeq ($$(art_target_or_host),target)
     LOCAL_MODULE_TAGS := tests
   endif
-  LOCAL_SRC_FILES := $$(art_gtest_filename) runtime/common_test.cc
-  LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/compiler
-  LOCAL_SHARED_LIBRARIES += libartd-compiler libartd
-  # dex2oatd is needed to go libartd-compiler and libartd
+  LOCAL_SRC_FILES := $$(art_gtest_filename) runtime/common_runtime_test.cc
+  LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime $(3)
+  LOCAL_SHARED_LIBRARIES += libartd $(4)
+  # dex2oatd is needed to go with libartd
   LOCAL_REQUIRED_MODULES := dex2oatd
 
   LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
@@ -146,7 +158,7 @@
     include $(LLVM_DEVICE_BUILD_MK)
     include $(BUILD_EXECUTABLE)
     art_gtest_exe := $$(LOCAL_MODULE_PATH)/$$(LOCAL_MODULE)
-    ART_TARGET_TEST_EXECUTABLES += $$(art_gtest_exe)
+    ART_TARGET_GTEST_EXECUTABLES += $$(art_gtest_exe)
   else # host
     LOCAL_CLANG := $(ART_HOST_CLANG)
     LOCAL_CFLAGS += $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
@@ -159,7 +171,7 @@
     include $(LLVM_HOST_BUILD_MK)
     include $(BUILD_HOST_EXECUTABLE)
     art_gtest_exe := $(HOST_OUT_EXECUTABLES)/$$(LOCAL_MODULE)
-    ART_HOST_TEST_EXECUTABLES += $$(art_gtest_exe)
+    ART_HOST_GTEST_EXECUTABLES += $$(art_gtest_exe)
   endif
 art_gtest_target := test-art-$$(art_target_or_host)-gtest-$$(art_gtest_name)
 ifeq ($$(art_target_or_host),target)
@@ -172,29 +184,31 @@
 	$(hide) (adb pull $(ART_TEST_DIR)/$$@ /tmp/ && echo $$@ PASSED) || (echo $$@ FAILED && exit 1)
 	$(hide) rm /tmp/$$@
 
-ART_TARGET_TEST_TARGETS += $$(art_gtest_target)
+ART_TARGET_GTEST_TARGETS += $$(art_gtest_target)
 else
 .PHONY: $$(art_gtest_target)
 $$(art_gtest_target): $$(art_gtest_exe) test-art-host-dependencies
 	$$<
 	@echo $$@ PASSED
 
-ART_HOST_TEST_TARGETS += $$(art_gtest_target)
+ART_HOST_GTEST_TARGETS += $$(art_gtest_target)
 
 .PHONY: valgrind-$$(art_gtest_target)
 valgrind-$$(art_gtest_target): $$(art_gtest_exe) test-art-host-dependencies
 	valgrind --leak-check=full --error-exitcode=1 $$<
 	@echo $$@ PASSED
 
-ART_HOST_VALGRIND_TEST_TARGETS += valgrind-$$(art_gtest_target)
+ART_HOST_VALGRIND_GTEST_TARGETS += valgrind-$$(art_gtest_target)
 endif
 endef
 
 ifeq ($(ART_BUILD_TARGET),true)
-  $(foreach file,$(TEST_TARGET_SRC_FILES), $(eval $(call build-art-test,target,$(file))))
+  $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call build-art-test,target,$(file),,)))
+  $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call build-art-test,target,$(file),art/compiler,libartd-compiler)))
 endif
 ifeq ($(WITH_HOST_DALVIK),true)
   ifeq ($(ART_BUILD_HOST),true)
-    $(foreach file,$(TEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file))))
+    $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),,)))
+    $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),art/compiler,libartd-compiler)))
   endif
 endif
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
new file mode 100644
index 0000000..0999d09
--- /dev/null
+++ b/compiler/common_compiler_test.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_
+#define ART_COMPILER_COMMON_COMPILER_TEST_H_
+
+#include "compiler_backend.h"
+#include "compiler_callbacks.h"
+#include "common_runtime_test.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "dex/verification_results.h"
+#include "driver/compiler_callbacks_impl.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+
+namespace art {
+
+#if defined(__arm__)
+
+#include <sys/ucontext.h>
+
+// A signal handler called when have an illegal instruction.  We record the fact in
+// a global boolean and then increment the PC in the signal context to return to
+// the next instruction.  We know the instruction is an sdiv (4 bytes long).
+static void baddivideinst(int signo, siginfo *si, void *data) {
+  (void)signo;
+  (void)si;
+  struct ucontext *uc = (struct ucontext *)data;
+  struct sigcontext *sc = &uc->uc_mcontext;
+  sc->arm_r0 = 0;     // set R0 to #0 to signal error
+  sc->arm_pc += 4;    // skip offending instruction
+}
+
+// This is in arch/arm/arm_sdiv.S.  It does the following:
+// mov r1,#1
+// sdiv r0,r1,r1
+// bx lr
+//
+// the result will be the value 1 if sdiv is supported.  If it is not supported
+// a SIGILL signal will be raised and the signal handler (baddivideinst) called.
+// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction.
+// Thus if the instruction is not supported, the result of this function will be #0
+
+extern "C" bool CheckForARMSDIVInstruction();
+
+static InstructionSetFeatures GuessInstructionFeatures() {
+  InstructionSetFeatures f;
+
+  // Uncomment this for processing of /proc/cpuinfo.
+  if (false) {
+    // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
+    // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
+    std::ifstream in("/proc/cpuinfo");
+    if (in) {
+      while (!in.eof()) {
+        std::string line;
+        std::getline(in, line);
+        if (!in.eof()) {
+          if (line.find("Features") != std::string::npos) {
+            if (line.find("idivt") != std::string::npos) {
+              f.SetHasDivideInstruction(true);
+            }
+          }
+        }
+        in.close();
+      }
+    } else {
+      LOG(INFO) << "Failed to open /proc/cpuinfo";
+    }
+  }
+
+  // See if have a sdiv instruction.  Register a signal handler and try to execute
+  // an sdiv instruction.  If we get a SIGILL then it's not supported.  We can't use
+  // the /proc/cpuinfo method for this because Krait devices don't always put the idivt
+  // feature in the list.
+  struct sigaction sa, osa;
+  sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
+  sa.sa_sigaction = baddivideinst;
+  sigaction(SIGILL, &sa, &osa);
+
+  if (CheckForARMSDIVInstruction()) {
+    f.SetHasDivideInstruction(true);
+  }
+
+  // Restore the signal handler.
+  sigaction(SIGILL, &osa, nullptr);
+
+  // Other feature guesses in here.
+  return f;
+}
+
+#endif
+
+// Given a set of instruction features from the build, parse it.  The
+// input 'str' is a comma separated list of feature names.  Parse it and
+// return the InstructionSetFeatures object.
+static InstructionSetFeatures ParseFeatureList(std::string str) {
+  InstructionSetFeatures result;
+  typedef std::vector<std::string> FeatureList;
+  FeatureList features;
+  Split(str, ',', features);
+  for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
+    std::string feature = Trim(*i);
+    if (feature == "default") {
+      // Nothing to do.
+    } else if (feature == "div") {
+      // Supports divide instruction.
+      result.SetHasDivideInstruction(true);
+    } else if (feature == "nodiv") {
+      // Turn off support for divide instruction.
+      result.SetHasDivideInstruction(false);
+    } else {
+      LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'";
+    }
+  }
+  // Others...
+  return result;
+}
+
+class CommonCompilerTest : public CommonRuntimeTest {
+ public:
+  static void MakeExecutable(const std::vector<uint8_t>& code) {
+    CHECK_NE(code.size(), 0U);
+    MakeExecutable(&code[0], code.size());
+  }
+
+  // Create an OatMethod based on pointers (for unit tests).
+  OatFile::OatMethod CreateOatMethod(const void* code,
+                                     const size_t frame_size_in_bytes,
+                                     const uint32_t core_spill_mask,
+                                     const uint32_t fp_spill_mask,
+                                     const uint8_t* mapping_table,
+                                     const uint8_t* vmap_table,
+                                     const uint8_t* gc_map) {
+    const byte* base;
+    uint32_t code_offset, mapping_table_offset, vmap_table_offset, gc_map_offset;
+    if (mapping_table == nullptr && vmap_table == nullptr && gc_map == nullptr) {
+      base = reinterpret_cast<const byte*>(code);  // Base of data points at code.
+      base -= kPointerSize;  // Move backward so that code_offset != 0.
+      code_offset = kPointerSize;
+      mapping_table_offset = 0;
+      vmap_table_offset = 0;
+      gc_map_offset = 0;
+    } else {
+      // TODO: 64bit support.
+      base = nullptr;  // Base of data in oat file, ie 0.
+      code_offset = PointerToLowMemUInt32(code);
+      mapping_table_offset = PointerToLowMemUInt32(mapping_table);
+      vmap_table_offset = PointerToLowMemUInt32(vmap_table);
+      gc_map_offset = PointerToLowMemUInt32(gc_map);
+    }
+    return OatFile::OatMethod(base,
+                              code_offset,
+                              frame_size_in_bytes,
+                              core_spill_mask,
+                              fp_spill_mask,
+                              mapping_table_offset,
+                              vmap_table_offset,
+                              gc_map_offset);
+  }
+
+  void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(method != nullptr);
+
+    const CompiledMethod* compiled_method = nullptr;
+    if (!method->IsAbstract()) {
+      mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
+      const DexFile& dex_file = *dex_cache->GetDexFile();
+      compiled_method =
+          compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
+                                                              method->GetDexMethodIndex()));
+    }
+    if (compiled_method != nullptr) {
+      const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
+      if (code == nullptr) {
+        code = compiled_method->GetPortableCode();
+      }
+      MakeExecutable(*code);
+      const void* method_code = CompiledMethod::CodePointer(&(*code)[0],
+                                                            compiled_method->GetInstructionSet());
+      LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
+      OatFile::OatMethod oat_method = CreateOatMethod(method_code,
+                                                      compiled_method->GetFrameSizeInBytes(),
+                                                      compiled_method->GetCoreSpillMask(),
+                                                      compiled_method->GetFpSpillMask(),
+                                                      &compiled_method->GetMappingTable()[0],
+                                                      &compiled_method->GetVmapTable()[0],
+                                                      nullptr);
+      oat_method.LinkMethod(method);
+      method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+    } else {
+      // No code? You must mean to go into the interpreter.
+      const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
+                                                     : GetQuickToInterpreterBridge();
+      OatFile::OatMethod oat_method = CreateOatMethod(method_code,
+                                                      kStackAlignment,
+                                                      0,
+                                                      0,
+                                                      nullptr,
+                                                      nullptr,
+                                                      nullptr);
+      oat_method.LinkMethod(method);
+      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+    }
+    // Create bridges to transition between different kinds of compiled bridge.
+    if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
+      method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
+    } else {
+      CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
+      method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
+      method->SetIsPortableCompiled();
+    }
+  }
+
+  static void MakeExecutable(const void* code_start, size_t code_length) {
+    CHECK(code_start != nullptr);
+    CHECK_NE(code_length, 0U);
+    uintptr_t data = reinterpret_cast<uintptr_t>(code_start);
+    uintptr_t base = RoundDown(data, kPageSize);
+    uintptr_t limit = RoundUp(data + code_length, kPageSize);
+    uintptr_t len = limit - base;
+    int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
+    CHECK_EQ(result, 0);
+
+    // Flush instruction cache
+    // Only uses __builtin___clear_cache if GCC >= 4.3.3
+#if GCC_VERSION >= 40303
+    __builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
+#else
+    LOG(FATAL) << "UNIMPLEMENTED: cache flush";
+#endif
+  }
+
+  void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::string class_descriptor(DotToDescriptor(class_name));
+    Thread* self = Thread::Current();
+    SirtRef<mirror::ClassLoader> loader(self, class_loader);
+    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
+    CHECK(klass != nullptr) << "Class not found " << class_name;
+    for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
+      MakeExecutable(klass->GetDirectMethod(i));
+    }
+    for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
+      MakeExecutable(klass->GetVirtualMethod(i));
+    }
+  }
+
+ protected:
+  virtual void SetUp() {
+    CommonRuntimeTest::SetUp();
+    {
+      ScopedObjectAccess soa(Thread::Current());
+
+      InstructionSet instruction_set = kNone;
+
+      // Take the default set of instruction features from the build.
+      InstructionSetFeatures instruction_set_features =
+          ParseFeatureList(STRINGIFY(ART_DEFAULT_INSTRUCTION_SET_FEATURES));
+
+#if defined(__arm__)
+      instruction_set = kThumb2;
+      InstructionSetFeatures runtime_features = GuessInstructionFeatures();
+
+      // for ARM, do a runtime check to make sure that the features we are passed from
+      // the build match the features we actually determine at runtime.
+      ASSERT_EQ(instruction_set_features, runtime_features);
+#elif defined(__mips__)
+      instruction_set = kMips;
+#elif defined(__i386__)
+      instruction_set = kX86;
+#elif defined(__x86_64__)
+      instruction_set = kX86_64;
+      // TODO: x86_64 compilation support.
+      compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
+#endif
+
+      for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
+        Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+        if (!runtime_->HasCalleeSaveMethod(type)) {
+          runtime_->SetCalleeSaveMethod(
+              runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
+        }
+      }
+
+      // TODO: make selectable
+      CompilerBackend::Kind compiler_backend
+          = (kUsePortableCompiler) ? CompilerBackend::kPortable : CompilerBackend::kQuick;
+      timer_.reset(new CumulativeLogger("Compilation times"));
+      compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
+                                                verification_results_.get(),
+                                                method_inliner_map_.get(),
+                                                compiler_backend, instruction_set,
+                                                instruction_set_features,
+                                                true, new CompilerDriver::DescriptorSet,
+                                                2, true, true, timer_.get()));
+    }
+    // We typically don't generate an image in unit tests, disable this optimization by default.
+    compiler_driver_->SetSupportBootImageFixup(false);
+  }
+
+  virtual void SetUpRuntimeOptions(Runtime::Options *options) {
+    CommonRuntimeTest::SetUpRuntimeOptions(options);
+
+    compiler_options_.reset(new CompilerOptions);
+    verification_results_.reset(new VerificationResults(compiler_options_.get()));
+    method_inliner_map_.reset(new DexFileToMethodInlinerMap);
+    callbacks_.reset(new CompilerCallbacksImpl(verification_results_.get(),
+                                               method_inliner_map_.get()));
+    options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+  }
+
+  virtual void TearDown() {
+    timer_.reset();
+    compiler_driver_.reset();
+    callbacks_.reset();
+    method_inliner_map_.reset();
+    verification_results_.reset();
+    compiler_options_.reset();
+
+    CommonRuntimeTest::TearDown();
+  }
+
+  void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::string class_descriptor(DotToDescriptor(class_name));
+    Thread* self = Thread::Current();
+    SirtRef<mirror::ClassLoader> loader(self, class_loader);
+    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
+    CHECK(klass != nullptr) << "Class not found " << class_name;
+    for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
+      CompileMethod(klass->GetDirectMethod(i));
+    }
+    for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
+      CompileMethod(klass->GetVirtualMethod(i));
+    }
+  }
+
+  void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(method != nullptr);
+    TimingLogger timings("CommonTest::CompileMethod", false, false);
+    timings.StartSplit("CompileOne");
+    compiler_driver_->CompileOne(method, timings);
+    MakeExecutable(method);
+    timings.EndSplit();
+  }
+
+  void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+                           const char* method_name, const char* signature)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::string class_descriptor(DotToDescriptor(class_name));
+    Thread* self = Thread::Current();
+    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
+    CHECK(klass != nullptr) << "Class not found " << class_name;
+    mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature);
+    CHECK(method != nullptr) << "Direct method not found: "
+                             << class_name << "." << method_name << signature;
+    CompileMethod(method);
+  }
+
+  void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+                            const char* method_name, const char* signature)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::string class_descriptor(DotToDescriptor(class_name));
+    Thread* self = Thread::Current();
+    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
+    CHECK(klass != nullptr) << "Class not found " << class_name;
+    mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
+    CHECK(method != NULL) << "Virtual method not found: "
+                          << class_name << "." << method_name << signature;
+    CompileMethod(method);
+  }
+
+  void ReserveImageSpace() {
+    // Reserve where the image will be loaded up front so that other parts of test set up don't
+    // accidentally end up colliding with the fixed memory address when we need to load the image.
+    std::string error_msg;
+    image_reservation_.reset(MemMap::MapAnonymous("image reservation",
+                                                  reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+                                                  (size_t)100 * 1024 * 1024,  // 100MB
+                                                  PROT_NONE,
+                                                  false /* no need for 4gb flag with fixed mmap*/,
+                                                  &error_msg));
+    CHECK(image_reservation_.get() != nullptr) << error_msg;
+  }
+
+  void UnreserveImageSpace() {
+    image_reservation_.reset();
+  }
+
+  UniquePtr<CompilerOptions> compiler_options_;
+  UniquePtr<VerificationResults> verification_results_;
+  UniquePtr<DexFileToMethodInlinerMap> method_inliner_map_;
+  UniquePtr<CompilerCallbacksImpl> callbacks_;
+  UniquePtr<CompilerDriver> compiler_driver_;
+  UniquePtr<CumulativeLogger> timer_;
+
+ private:
+  UniquePtr<MemMap> image_reservation_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_COMMON_COMPILER_TEST_H_
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6e97c53..b74052c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -23,7 +23,7 @@
 #include "dex/compiler_ir.h"
 #include "dex/backend.h"
 #include "driver/compiler_driver.h"
-#include "leb128_encoder.h"
+#include "leb128.h"
 #include "safe_map.h"
 #include "utils/arena_allocator.h"
 #include "utils/growable_array.h"
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index ec0a8bd..34806ce 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -21,7 +21,7 @@
 
 #include "UniquePtr.h"
 #include "class_linker.h"
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "dex_file.h"
 #include "gc/heap.h"
 #include "mirror/art_method-inl.h"
@@ -33,7 +33,7 @@
 
 namespace art {
 
-class CompilerDriverTest : public CommonTest {
+class CompilerDriverTest : public CommonCompilerTest {
  protected:
   void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
     TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 5bad0d0..8175c35 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -14,18 +14,18 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
-
-#include "oat.h"
 #include "elf_file.h"
 
+#include "common_compiler_test.h"
+#include "oat.h"
+
 namespace art {
 
-class ElfWriterTest : public CommonTest {
+class ElfWriterTest : public CommonCompilerTest {
  protected:
   virtual void SetUp() {
     ReserveImageSpace();
-    CommonTest::SetUp();
+    CommonCompilerTest::SetUp();
   }
 };
 
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 49cabdc..16e2aa2 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -14,15 +14,16 @@
  * limitations under the License.
  */
 
+#include "image.h"
+
 #include <string>
 #include <vector>
 
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "compiler/elf_fixup.h"
 #include "compiler/image_writer.h"
 #include "compiler/oat_writer.h"
 #include "gc/space/image_space.h"
-#include "image.h"
 #include "lock_word.h"
 #include "mirror/object-inl.h"
 #include "signal_catcher.h"
@@ -32,11 +33,11 @@
 
 namespace art {
 
-class ImageTest : public CommonTest {
+class ImageTest : public CommonCompilerTest {
  protected:
   virtual void SetUp() {
     ReserveImageSpace();
-    CommonTest::SetUp();
+    CommonCompilerTest::SetUp();
   }
 };
 
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c8447be..aa16885 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -583,6 +583,12 @@
   DCHECK(orig != NULL);
   DCHECK(copy != NULL);
   copy->SetClass<kVerifyNone>(down_cast<Class*>(GetImageAddress(orig->GetClass())));
+  if (kUseBrooksPointer) {
+    orig->AssertSelfBrooksPointer();
+    // Note the address 'copy' isn't the same as the image address of 'orig'.
+    copy->SetBrooksPointer(GetImageAddress(orig));
+    DCHECK(copy->GetBrooksPointer() == GetImageAddress(orig));
+  }
   // TODO: special case init of pointers to malloc data (or removal of these pointers)
   if (orig->IsClass<kVerifyNone>()) {
     FixupClass(orig->AsClass<kVerifyNone>(), down_cast<Class*>(copy));
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 1bdff37..f48cf6c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -15,7 +15,7 @@
  */
 
 #include "class_linker.h"
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "dex_file.h"
 #include "gtest/gtest.h"
 #include "indirect_reference_table.h"
@@ -43,7 +43,7 @@
 
 namespace art {
 
-class JniCompilerTest : public CommonTest {
+class JniCompilerTest : public CommonCompilerTest {
  protected:
   void CompileForTest(jobject class_loader, bool direct,
                       const char* method_name, const char* method_sig) {
diff --git a/compiler/leb128_encoder.h b/compiler/leb128_encoder.h
deleted file mode 100644
index 6766683..0000000
--- a/compiler/leb128_encoder.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LEB128_ENCODER_H_
-#define ART_COMPILER_LEB128_ENCODER_H_
-
-#include "base/macros.h"
-#include "leb128.h"
-
-namespace art {
-
-static inline uint8_t* EncodeUnsignedLeb128(uint8_t* dest, uint32_t value) {
-  uint8_t out = value & 0x7f;
-  value >>= 7;
-  while (value != 0) {
-    *dest++ = out | 0x80;
-    out = value & 0x7f;
-    value >>= 7;
-  }
-  *dest++ = out;
-  return dest;
-}
-
-static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
-  uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
-  uint8_t out = value & 0x7f;
-  while (extra_bits != 0u) {
-    *dest++ = out | 0x80;
-    value >>= 7;
-    out = value & 0x7f;
-    extra_bits >>= 7;
-  }
-  *dest++ = out;
-  return dest;
-}
-
-// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-class Leb128EncodingVector {
- public:
-  Leb128EncodingVector() {
-  }
-
-  void Reserve(uint32_t size) {
-    data_.reserve(size);
-  }
-
-  void PushBackUnsigned(uint32_t value) {
-    uint8_t out = value & 0x7f;
-    value >>= 7;
-    while (value != 0) {
-      data_.push_back(out | 0x80);
-      out = value & 0x7f;
-      value >>= 7;
-    }
-    data_.push_back(out);
-  }
-
-  template<typename It>
-  void InsertBackUnsigned(It cur, It end) {
-    for (; cur != end; ++cur) {
-      PushBackUnsigned(*cur);
-    }
-  }
-
-  void PushBackSigned(int32_t value) {
-    uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
-    uint8_t out = value & 0x7f;
-    while (extra_bits != 0u) {
-      data_.push_back(out | 0x80);
-      value >>= 7;
-      out = value & 0x7f;
-      extra_bits >>= 7;
-    }
-    data_.push_back(out);
-  }
-
-  template<typename It>
-  void InsertBackSigned(It cur, It end) {
-    for (; cur != end; ++cur) {
-      PushBackSigned(*cur);
-    }
-  }
-
-  const std::vector<uint8_t>& GetData() const {
-    return data_;
-  }
-
- private:
-  std::vector<uint8_t> data_;
-
-  DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_LEB128_ENCODER_H_
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index e91ffcb..55a962f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -14,20 +14,19 @@
  * limitations under the License.
  */
 
-#include "compiler/oat_writer.h"
+#include "common_compiler_test.h"
 #include "compiler/compiler_backend.h"
+#include "compiler/oat_writer.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
 #include "oat_file.h"
 #include "vector_output_stream.h"
 
-#include "common_test.h"
-
 namespace art {
 
-class OatTest : public CommonTest {
+class OatTest : public CommonCompilerTest {
  protected:
   static const bool kCompile = false;  // DISABLED_ due to the time to compile libcore
 
@@ -81,7 +80,7 @@
 };
 
 TEST_F(OatTest, WriteRead) {
-  TimingLogger timings("CommonTest::WriteRead", false, false);
+  TimingLogger timings("OatTest::WriteRead", false, false);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   // TODO: make selectable.
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index a957ee3..290bf25 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -14,15 +14,16 @@
  * limitations under the License.
  */
 
-#include "base/logging.h"
-#include "buffered_output_stream.h"
-#include "common_test.h"
 #include "file_output_stream.h"
 #include "vector_output_stream.h"
 
+#include "base/logging.h"
+#include "buffered_output_stream.h"
+#include "common_runtime_test.h"
+
 namespace art {
 
-class OutputStreamTest : public CommonTest {
+class OutputStreamTest : public CommonRuntimeTest {
  protected:
   void CheckOffset(off_t expected) {
     off_t actual = output_stream_->Seek(0, kSeekCurrent);
diff --git a/compiler/sea_ir/ir/regions_test.cc b/compiler/sea_ir/ir/regions_test.cc
index 8ca51e4..95bd310 100644
--- a/compiler/sea_ir/ir/regions_test.cc
+++ b/compiler/sea_ir/ir/regions_test.cc
@@ -14,15 +14,14 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "sea_ir/ir/sea.h"
 
 using utils::ScopedHashtable;
 
 namespace sea_ir {
 
-class RegionsTest : public art::CommonTest {
-};
+class RegionsTest : public art::CommonCompilerTest {};
 
 TEST_F(RegionsTest, Basics) {
   sea_ir::SeaGraph sg(*java_lang_dex_file_);
diff --git a/compiler/sea_ir/types/type_data_test.cc b/compiler/sea_ir/types/type_data_test.cc
index f7a5362..42c6973 100644
--- a/compiler/sea_ir/types/type_data_test.cc
+++ b/compiler/sea_ir/types/type_data_test.cc
@@ -14,13 +14,12 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "sea_ir/types/types.h"
 
 namespace sea_ir {
 
-class TypeDataTest : public art::CommonTest {
-};
+class TypeDataTest : public art::CommonCompilerTest {};
 
 TEST_F(TypeDataTest, Basics) {
   TypeData td;
diff --git a/compiler/sea_ir/types/type_inference_visitor_test.cc b/compiler/sea_ir/types/type_inference_visitor_test.cc
index 77acb3d..ccb6991 100644
--- a/compiler/sea_ir/types/type_inference_visitor_test.cc
+++ b/compiler/sea_ir/types/type_inference_visitor_test.cc
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "sea_ir/types/type_inference_visitor.h"
 #include "sea_ir/ir/sea.h"
 
@@ -31,8 +31,7 @@
   std::vector<InstructionNode*> producers_;
 };
 
-class TypeInferenceVisitorTest : public art::CommonTest {
-};
+class TypeInferenceVisitorTest : public art::CommonCompilerTest {};
 
 TEST_F(TypeInferenceVisitorTest, MergeIntWithByte) {
   TypeData td;
diff --git a/compiler/utils/scoped_hashtable_test.cc b/compiler/utils/scoped_hashtable_test.cc
index 68608f0..1c843eb 100644
--- a/compiler/utils/scoped_hashtable_test.cc
+++ b/compiler/utils/scoped_hashtable_test.cc
@@ -14,8 +14,9 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
-#include "utils/scoped_hashtable.h"
+#include "scoped_hashtable.h"
+
+#include "common_runtime_test.h"
 
 using utils::ScopedHashtable;
 
@@ -27,8 +28,7 @@
   int value_;
 };
 
-class ScopedHashtableTest : public CommonTest {
-};
+class ScopedHashtableTest : public testing::Test {};
 
 TEST_F(ScopedHashtableTest, Basics) {
   ScopedHashtable<int, Value*> sht;
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index ea8461f..3486c1d 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -152,15 +152,7 @@
     return EXIT_FAILURE;
   }
 
-  // Make sure they provided a class name.
-  if (arg_idx == argc) {
-    fprintf(stderr, "Class name required\n");
-    return EXIT_FAILURE;
-  }
-
-  // insert additional internal options here
-
-  if (curr_opt >= option_count) {
+  if (curr_opt > option_count) {
     fprintf(stderr, "curr_opt(%d) >= option_count(%d)\n", curr_opt, option_count);
     abort();
     return EXIT_FAILURE;
@@ -187,6 +179,14 @@
     return EXIT_FAILURE;
   }
 
+  // Make sure they provided a class name. We do this after
+  // JNI_CreateJavaVM so that things like "-help" have the opportunity
+  // to emit a usage statement.
+  if (arg_idx == argc) {
+    fprintf(stderr, "Class name required\n");
+    return EXIT_FAILURE;
+  }
+
   int rc = InvokeMain(env, &argv[arg_idx]);
 
 #if defined(NDEBUG)
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 1e5a681..d6d2b42 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -332,7 +332,7 @@
   ifeq ($$(art_target_or_host),target)
     LOCAL_SRC_FILES := $(LIBART_TARGET_SRC_FILES)
     $(foreach arch,$(ART_SUPPORTED_ARCH),
-      LOCAL_SRC_FILES_$(arch) := $$(LIBART_TARGET_SRC_FILES_$(arch))))
+      LOCAL_SRC_FILES_$(arch) := $$(LIBART_TARGET_SRC_FILES_$(arch)))
   else # host
     LOCAL_SRC_FILES := $(LIBART_HOST_SRC_FILES)
     LOCAL_IS_HOST_MODULE := true
@@ -352,7 +352,7 @@
   LOCAL_CFLAGS := $(LIBART_CFLAGS)
   LOCAL_LDFLAGS := $(LIBART_LDFLAGS)
   $(foreach arch,$(ART_SUPPORTED_ARCH),
-    LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch))))
+    LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch)))
 
   ifeq ($$(art_target_or_host),target)
     LOCAL_CLANG := $(ART_TARGET_CLANG)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index b1f2275..8683a56 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1307,8 +1307,10 @@
      *   esi: pointer to this string data
      *   edi: pointer to comp string data
      */
+    jecxz .Lkeep_length
     repe cmpsw                    // find nonmatching chars in [%esi] and [%edi], up to length %ecx
     jne .Lnot_equal
+.Lkeep_length:
     POP edi                       // pop callee save reg
     POP esi                       // pop callee save reg
     ret
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 4c42099..0c1a72a 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -17,6 +17,8 @@
 #ifndef ART_RUNTIME_ASM_SUPPORT_H_
 #define ART_RUNTIME_ASM_SUPPORT_H_
 
+#include "brooks_pointer.h"
+
 // Value loaded into rSUSPEND for quick. When this value is counted down to zero we do a suspend
 // check.
 #define SUSPEND_CHECK_INTERVAL (1000)
@@ -25,6 +27,8 @@
 #define CLASS_OFFSET 0
 #define LOCK_WORD_OFFSET 4
 
+#ifndef USE_BROOKS_POINTER
+
 // Offsets within java.lang.Class.
 #define CLASS_COMPONENT_TYPE_OFFSET 12
 
@@ -43,4 +47,26 @@
 #define METHOD_PORTABLE_CODE_OFFSET 40
 #define METHOD_QUICK_CODE_OFFSET 48
 
+#else
+
+// Offsets within java.lang.Class.
+#define CLASS_COMPONENT_TYPE_OFFSET 20
+
+// Array offsets.
+#define ARRAY_LENGTH_OFFSET 16
+#define OBJECT_ARRAY_DATA_OFFSET 20
+
+// Offsets within java.lang.String.
+#define STRING_VALUE_OFFSET 16
+#define STRING_COUNT_OFFSET 20
+#define STRING_OFFSET_OFFSET 28
+#define STRING_DATA_OFFSET 20
+
+// Offsets within java.lang.Method.
+#define METHOD_DEX_CACHE_METHODS_OFFSET 20
+#define METHOD_PORTABLE_CODE_OFFSET 48
+#define METHOD_QUICK_CODE_OFFSET 56
+
+#endif
+
 #endif  // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 69951c5..7d32338 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -19,7 +19,7 @@
 #include <string>
 
 #include "atomic.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "mirror/object_array-inl.h"
 #include "thread_pool.h"
 #include "UniquePtr.h"
@@ -56,7 +56,7 @@
   AtomicInteger* const count3_;
 };
 
-class BarrierTest : public CommonTest {
+class BarrierTest : public CommonRuntimeTest {
  public:
   static int32_t num_threads;
 };
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index cf7029a..6cc9396 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -21,6 +21,15 @@
 
 #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
 
+// C++11 final and override keywords that were introduced in GCC version 4.7.
+#if GCC_VERSION >= 40700
+#define OVERRIDE override
+#define FINAL final
+#else
+#define OVERRIDE
+#define FINAL
+#endif
+
 // The COMPILE_ASSERT macro can be used to verify that a compile time
 // expression is true. For example, you could use it to verify the
 // size of a static array:
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 1af8e0a..ee0b1be 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -16,11 +16,11 @@
 
 #include "mutex.h"
 
-#include "common_test.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
-class MutexTest : public CommonTest {};
+class MutexTest : public CommonRuntimeTest {};
 
 struct MutexTester {
   static void AssertDepth(Mutex& mu, uint32_t expected_depth) {
diff --git a/runtime/base/timing_logger_test.cc b/runtime/base/timing_logger_test.cc
index 03cc9cc..0757751 100644
--- a/runtime/base/timing_logger_test.cc
+++ b/runtime/base/timing_logger_test.cc
@@ -16,11 +16,11 @@
 
 #include "timing_logger.h"
 
-#include "common_test.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
-class TimingLoggerTest : public CommonTest {};
+class TimingLoggerTest : public CommonRuntimeTest {};
 
 // TODO: Negative test cases (improper pairing of EndSplit, etc.)
 
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
index 49750f4..7e45321 100644
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ b/runtime/base/unix_file/mapped_file_test.cc
@@ -30,7 +30,7 @@
   }
 
   void SetUp() {
-    art::CommonTest::SetEnvironmentVariables(android_data_);
+    art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
 
     good_path_ = GetTmpPath("some-file.txt");
     int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 3152788..8a6605e 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -21,8 +21,7 @@
 
 #include <string>
 
-#include "common_test.h"
-#include "gtest/gtest.h"
+#include "common_runtime_test.h"
 #include "UniquePtr.h"
 
 namespace unix_file {
@@ -37,7 +36,7 @@
   virtual RandomAccessFile* MakeTestFile() = 0;
 
   virtual void SetUp() {
-    art::CommonTest::SetEnvironmentVariables(android_data_);
+    art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
   }
 
   std::string GetTmpPath(const std::string& name) {
diff --git a/runtime/brooks_pointer.h b/runtime/brooks_pointer.h
new file mode 100644
index 0000000..3dac6e9
--- /dev/null
+++ b/runtime/brooks_pointer.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BROOKS_POINTER_H_
+#define ART_RUNTIME_BROOKS_POINTER_H_
+
+// This is in a separate file (from globals.h) because asm_support.h
+// (a C header, not C++) can't include globals.h.
+
+// Uncomment this and the two fields in Object.java (libcore) to
+// enable brooks pointers.
+// #define USE_BROOKS_POINTER
+
+#endif  // ART_RUNTIME_BROOKS_POINTER_H_
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 6ef0082..3da7409 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -40,7 +40,7 @@
 
 inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* element_class) {
   for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
-    // Read the cached the array class once to avoid races with other threads setting it.
+    // Read the cached array class once to avoid races with other threads setting it.
     mirror::Class* array_class = find_array_class_cache_[i];
     if (array_class != nullptr && array_class->GetComponentType() == element_class) {
       return array_class;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 69d957f..87323f9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -205,6 +205,9 @@
   CHECK(java_lang_Class.get() != NULL);
   mirror::Class::SetClassClass(java_lang_Class.get());
   java_lang_Class->SetClass(java_lang_Class.get());
+  if (kUseBrooksPointer) {
+    java_lang_Class->AssertSelfBrooksPointer();
+  }
   java_lang_Class->SetClassSize(sizeof(mirror::ClassClass));
   heap->DecrementDisableMovingGC(self);
   // AllocClass(mirror::Class*) can now be used
@@ -1182,27 +1185,47 @@
   return dex_cache.get();
 }
 
+// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
+// fence.
+class InitializeClassVisitor {
+ public:
+  explicit InitializeClassVisitor(uint32_t class_size) : class_size_(class_size) {
+  }
+
+  void operator()(mirror::Object* obj, size_t usable_size) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK_LE(class_size_, usable_size);
+    // Avoid AsClass as object is not yet in live bitmap or allocation stack.
+    mirror::Class* klass = down_cast<mirror::Class*>(obj);
+    // DCHECK(klass->IsClass());
+    klass->SetClassSize(class_size_);
+    klass->SetPrimitiveType(Primitive::kPrimNot);  // Default to not being primitive.
+    klass->SetDexClassDefIndex(DexFile::kDexNoIndex16);  // Default to no valid class def index.
+    klass->SetDexTypeIndex(DexFile::kDexNoIndex16);  // Default to no valid type index.
+  }
+
+ private:
+  const uint32_t class_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(InitializeClassVisitor);
+};
+
 mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class,
-                                       size_t class_size) {
+                                       uint32_t class_size) {
   DCHECK_GE(class_size, sizeof(mirror::Class));
   gc::Heap* heap = Runtime::Current()->GetHeap();
+  InitializeClassVisitor visitor(class_size);
   mirror::Object* k =
-      kMovingClasses ?
-          heap->AllocObject<true>(self, java_lang_Class, class_size) :
-          heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size);
-  if (UNLIKELY(k == NULL)) {
+      kMovingClasses ? heap->AllocObject<true>(self, java_lang_Class, class_size, visitor)
+                     : heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
+  if (UNLIKELY(k == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
-    return NULL;
+    return nullptr;
   }
-  mirror::Class* klass = k->AsClass();
-  klass->SetPrimitiveType(Primitive::kPrimNot);  // Default to not being primitive.
-  klass->SetClassSize(class_size);
-  klass->SetDexClassDefIndex(DexFile::kDexNoIndex16);  // Default to no valid class def index.
-  klass->SetDexTypeIndex(DexFile::kDexNoIndex16);  // Default to no valid type index.
-  return klass;
+  return k->AsClass();
 }
 
-mirror::Class* ClassLinker::AllocClass(Thread* self, size_t class_size) {
+mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) {
   return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
 }
 
@@ -1416,7 +1439,7 @@
 }
 
 // Precomputes size that will be needed for Class, matching LinkStaticFields
-size_t ClassLinker::SizeOfClass(const DexFile& dex_file,
+uint32_t ClassLinker::SizeOfClass(const DexFile& dex_file,
                                 const DexFile::ClassDef& dex_class_def) {
   const byte* class_data = dex_file.GetClassData(dex_class_def);
   size_t num_ref = 0;
@@ -1437,7 +1460,7 @@
     }
   }
   // start with generic class data
-  size_t size = sizeof(mirror::Class);
+  uint32_t size = sizeof(mirror::Class);
   // follow with reference fields which must be contiguous at start
   size += (num_ref * sizeof(uint32_t));
   // if there are 64-bit fields to add, make sure they are aligned
@@ -1744,6 +1767,9 @@
   CHECK(descriptor != NULL);
 
   klass->SetClass(GetClassRoot(kJavaLangClass));
+  if (kUseBrooksPointer) {
+    klass->AssertSelfBrooksPointer();
+  }
   uint32_t access_flags = dex_class_def.access_flags_;
   // Make sure that none of our runtime-only flags are set.
   CHECK_EQ(access_flags & ~kAccJavaFlagsMask, 0U);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f346102..88dbb9c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -388,13 +388,13 @@
   void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // For early bootstrapping by Init
-  mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, size_t class_size)
+  mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Alloc* convenience functions to avoid needing to pass in mirror::Class*
   // values that are known to the ClassLinker such as
   // kObjectArrayClass and kJavaLangString etc.
-  mirror::Class* AllocClass(Thread* self, size_t class_size)
+  mirror::Class* AllocClass(Thread* self, uint32_t class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -419,7 +419,7 @@
                          mirror::Class* c, SafeMap<uint32_t, mirror::ArtField*>& field_map)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t SizeOfClass(const DexFile& dex_file,
+  uint32_t SizeOfClass(const DexFile& dex_file,
                      const DexFile::ClassDef& dex_class_def);
 
   void LoadClass(const DexFile& dex_file,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d6a67cc..55c23f4 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -20,7 +20,7 @@
 
 #include "UniquePtr.h"
 #include "class_linker-inl.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "dex_file.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "gc/heap.h"
@@ -37,7 +37,7 @@
 
 namespace art {
 
-class ClassLinkerTest : public CommonTest {
+class ClassLinkerTest : public CommonRuntimeTest {
  protected:
   void AssertNonExistentClass(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -339,7 +339,7 @@
     mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex);
     mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods();
     for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) {
-      EXPECT_TRUE(resolved_methods->Get(i) != NULL);
+      EXPECT_TRUE(resolved_methods->Get(i) != NULL) << dex->GetLocation() << " i=" << i;
     }
   }
 
@@ -451,6 +451,10 @@
 
     // alphabetical 32-bit
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_"));
+#ifdef USE_BROOKS_POINTER
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_brooks_ptr_), "shadow$_x_brooks_ptr_"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_padding_), "shadow$_x_padding_"));
+#endif
   };
 };
 
@@ -705,11 +709,21 @@
   EXPECT_FALSE(JavaLangObject->IsSynthetic());
   EXPECT_EQ(2U, JavaLangObject->NumDirectMethods());
   EXPECT_EQ(11U, JavaLangObject->NumVirtualMethods());
-  EXPECT_EQ(2U, JavaLangObject->NumInstanceFields());
+  if (!kUseBrooksPointer) {
+    EXPECT_EQ(2U, JavaLangObject->NumInstanceFields());
+  } else {
+    EXPECT_EQ(4U, JavaLangObject->NumInstanceFields());
+  }
   FieldHelper fh(JavaLangObject->GetInstanceField(0));
   EXPECT_STREQ(fh.GetName(), "shadow$_klass_");
   fh.ChangeField(JavaLangObject->GetInstanceField(1));
   EXPECT_STREQ(fh.GetName(), "shadow$_monitor_");
+  if (kUseBrooksPointer) {
+    fh.ChangeField(JavaLangObject->GetInstanceField(2));
+    EXPECT_STREQ(fh.GetName(), "shadow$_x_brooks_ptr_");
+    fh.ChangeField(JavaLangObject->GetInstanceField(3));
+    EXPECT_STREQ(fh.GetName(), "shadow$_x_padding_");
+  }
 
   EXPECT_EQ(0U, JavaLangObject->NumStaticFields());
   EXPECT_EQ(0U, kh.NumDirectInterfaces());
diff --git a/runtime/common_test.cc b/runtime/common_runtime_test.cc
similarity index 92%
rename from runtime/common_test.cc
rename to runtime/common_runtime_test.cc
index 5df7d41..0ed8b63 100644
--- a/runtime/common_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -19,7 +19,7 @@
 
 int main(int argc, char **argv) {
   art::InitLogging(argv);
-  LOG(INFO) << "Running main() from common_test.cc...";
+  LOG(INFO) << "Running main() from common_runtime_test.cc...";
   testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
 }
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
new file mode 100644
index 0000000..e2ecf4b
--- /dev/null
+++ b/runtime/common_runtime_test.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_COMMON_RUNTIME_TEST_H_
+#define ART_RUNTIME_COMMON_RUNTIME_TEST_H_
+
+#include <dirent.h>
+#include <dlfcn.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fstream>
+
+#include "../../external/icu4c/common/unicode/uvernum.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/stringprintf.h"
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "compiler_callbacks.h"
+#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "gc/heap.h"
+#include "gtest/gtest.h"
+#include "instruction_set.h"
+#include "interpreter/interpreter.h"
+#include "mirror/class_loader.h"
+#include "oat_file.h"
+#include "object_utils.h"
+#include "os.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "thread.h"
+#include "utils.h"
+#include "UniquePtr.h"
+#include "verifier/method_verifier.h"
+#include "verifier/method_verifier-inl.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+class ScratchFile {
+ public:
+  ScratchFile() {
+    filename_ = getenv("ANDROID_DATA");
+    filename_ += "/TmpFile-XXXXXX";
+    int fd = mkstemp(&filename_[0]);
+    CHECK_NE(-1, fd);
+    file_.reset(new File(fd, GetFilename()));
+  }
+
+  ~ScratchFile() {
+    int unlink_result = unlink(filename_.c_str());
+    CHECK_EQ(0, unlink_result);
+  }
+
+  const std::string& GetFilename() const {
+    return filename_;
+  }
+
+  File* GetFile() const {
+    return file_.get();
+  }
+
+  int GetFd() const {
+    return file_->Fd();
+  }
+
+ private:
+  std::string filename_;
+  UniquePtr<File> file_;
+};
+
+class NoopCompilerCallbacks : public CompilerCallbacks {
+ public:
+  NoopCompilerCallbacks() {}
+  virtual ~NoopCompilerCallbacks() {}
+  virtual bool MethodVerified(verifier::MethodVerifier* verifier) {
+    return true;
+  }
+  virtual void ClassRejected(ClassReference ref) {}
+};
+
+class CommonRuntimeTest : public testing::Test {
+ public:
+  static void SetEnvironmentVariables(std::string& android_data) {
+    if (IsHost()) {
+      // $ANDROID_ROOT is set on the device, but not on the host.
+      // We need to set this so that icu4c can find its locale data.
+      std::string root;
+      const char* android_build_top = getenv("ANDROID_BUILD_TOP");
+      if (android_build_top != nullptr) {
+        root += android_build_top;
+      } else {
+        // Not set by build server, so default to current directory
+        char* cwd = getcwd(nullptr, 0);
+        setenv("ANDROID_BUILD_TOP", cwd, 1);
+        root += cwd;
+        free(cwd);
+      }
+#if defined(__linux__)
+      root += "/out/host/linux-x86";
+#elif defined(__APPLE__)
+      root += "/out/host/darwin-x86";
+#else
+#error unsupported OS
+#endif
+      setenv("ANDROID_ROOT", root.c_str(), 1);
+      setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
+
+      // Not set by build server, so default
+      if (getenv("ANDROID_HOST_OUT") == nullptr) {
+        setenv("ANDROID_HOST_OUT", root.c_str(), 1);
+      }
+    }
+
+    // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
+    android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX");
+    if (mkdtemp(&android_data[0]) == nullptr) {
+      PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed";
+    }
+    setenv("ANDROID_DATA", android_data.c_str(), 1);
+  }
+
+ protected:
+  static bool IsHost() {
+    return !kIsTargetBuild;
+  }
+
+  virtual void SetUp() {
+    SetEnvironmentVariables(android_data_);
+    dalvik_cache_.append(android_data_.c_str());
+    dalvik_cache_.append("/dalvik-cache");
+    int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
+    ASSERT_EQ(mkdir_result, 0);
+
+    std::string error_msg;
+    java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName().c_str(),
+                                        GetLibCoreDexFileName().c_str(), &error_msg);
+    if (java_lang_dex_file_ == nullptr) {
+      LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "': "
+          << error_msg << "\n";
+    }
+    boot_class_path_.push_back(java_lang_dex_file_);
+
+    std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
+    std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
+
+    Runtime::Options options;
+    options.push_back(std::make_pair("bootclasspath", &boot_class_path_));
+    options.push_back(std::make_pair("-Xcheck:jni", nullptr));
+    options.push_back(std::make_pair(min_heap_string.c_str(), nullptr));
+    options.push_back(std::make_pair(max_heap_string.c_str(), nullptr));
+    options.push_back(std::make_pair("compilercallbacks", &callbacks_));
+    SetUpRuntimeOptions(&options);
+    if (!Runtime::Create(options, false)) {
+      LOG(FATAL) << "Failed to create runtime";
+      return;
+    }
+    runtime_.reset(Runtime::Current());
+    class_linker_ = runtime_->GetClassLinker();
+    class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
+
+    // Runtime::Create acquired the mutator_lock_ that is normally given away when we
+    // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess.
+    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+
+    // We're back in native, take the opportunity to initialize well known classes.
+    WellKnownClasses::Init(Thread::Current()->GetJniEnv());
+
+    // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread
+    // pool is created by the runtime.
+    runtime_->GetHeap()->CreateThreadPool();
+    runtime_->GetHeap()->VerifyHeap();  // Check for heap corruption before the test
+  }
+
+  // Allow subclases such as CommonCompilerTest to add extra options.
+  virtual void SetUpRuntimeOptions(Runtime::Options *options) {}
+
+  virtual void TearDown() {
+    const char* android_data = getenv("ANDROID_DATA");
+    ASSERT_TRUE(android_data != nullptr);
+    DIR* dir = opendir(dalvik_cache_.c_str());
+    ASSERT_TRUE(dir != nullptr);
+    dirent* e;
+    while ((e = readdir(dir)) != nullptr) {
+      if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
+        continue;
+      }
+      std::string filename(dalvik_cache_);
+      filename.push_back('/');
+      filename.append(e->d_name);
+      int unlink_result = unlink(filename.c_str());
+      ASSERT_EQ(0, unlink_result);
+    }
+    closedir(dir);
+    int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
+    ASSERT_EQ(0, rmdir_cache_result);
+    int rmdir_data_result = rmdir(android_data_.c_str());
+    ASSERT_EQ(0, rmdir_data_result);
+
+    // icu4c has a fixed 10-element array "gCommonICUDataArray".
+    // If we run > 10 tests, we fill that array and u_setCommonData fails.
+    // There's a function to clear the array, but it's not public...
+    typedef void (*IcuCleanupFn)();
+    void* sym = dlsym(RTLD_DEFAULT, "u_cleanup_" U_ICU_VERSION_SHORT);
+    CHECK(sym != nullptr);
+    IcuCleanupFn icu_cleanup_fn = reinterpret_cast<IcuCleanupFn>(sym);
+    (*icu_cleanup_fn)();
+
+    STLDeleteElements(&opened_dex_files_);
+
+    Runtime::Current()->GetHeap()->VerifyHeap();  // Check for heap corruption after the test
+  }
+
+  std::string GetLibCoreDexFileName() {
+    return GetDexFileName("core-libart");
+  }
+
+  std::string GetDexFileName(const std::string& jar_prefix) {
+    if (IsHost()) {
+      const char* host_dir = getenv("ANDROID_HOST_OUT");
+      CHECK(host_dir != nullptr);
+      return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str());
+    }
+    return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
+  }
+
+  std::string GetTestAndroidRoot() {
+    if (IsHost()) {
+      const char* host_dir = getenv("ANDROID_HOST_OUT");
+      CHECK(host_dir != nullptr);
+      return host_dir;
+    }
+    return GetAndroidRoot();
+  }
+
+  const DexFile* OpenTestDexFile(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(name != nullptr);
+    std::string filename;
+    if (IsHost()) {
+      filename += getenv("ANDROID_HOST_OUT");
+      filename += "/framework/";
+    } else {
+      filename += "/data/nativetest/art/";
+    }
+    filename += "art-test-dex-";
+    filename += name;
+    filename += ".jar";
+    std::string error_msg;
+    const DexFile* dex_file = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg);
+    CHECK(dex_file != nullptr) << "Failed to open '" << filename << "': " << error_msg;
+    CHECK_EQ(PROT_READ, dex_file->GetPermissions());
+    CHECK(dex_file->IsReadOnly());
+    opened_dex_files_.push_back(dex_file);
+    return dex_file;
+  }
+
+  jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    const DexFile* dex_file = OpenTestDexFile(dex_name);
+    CHECK(dex_file != nullptr);
+    class_linker_->RegisterDexFile(*dex_file);
+    std::vector<const DexFile*> class_path;
+    class_path.push_back(dex_file);
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    ScopedLocalRef<jobject> class_loader_local(soa.Env(),
+        soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
+    jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
+    soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
+    Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path);
+    return class_loader;
+  }
+
+  std::string android_data_;
+  std::string dalvik_cache_;
+  const DexFile* java_lang_dex_file_;  // owned by runtime_
+  std::vector<const DexFile*> boot_class_path_;
+  UniquePtr<Runtime> runtime_;
+  // Owned by the runtime
+  ClassLinker* class_linker_;
+
+ private:
+  NoopCompilerCallbacks callbacks_;
+  std::vector<const DexFile*> opened_dex_files_;
+};
+
+// Sets a CheckJni abort hook to catch failures. Note that this will cause CheckJNI to carry on
+// rather than aborting, so be careful!
+class CheckJniAbortCatcher {
+ public:
+  CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
+    vm_->check_jni_abort_hook = Hook;
+    vm_->check_jni_abort_hook_data = &actual_;
+  }
+
+  ~CheckJniAbortCatcher() {
+    vm_->check_jni_abort_hook = nullptr;
+    vm_->check_jni_abort_hook_data = nullptr;
+    EXPECT_TRUE(actual_.empty()) << actual_;
+  }
+
+  void Check(const char* expected_text) {
+    EXPECT_TRUE(actual_.find(expected_text) != std::string::npos) << "\n"
+        << "Expected to find: " << expected_text << "\n"
+        << "In the output   : " << actual_;
+    actual_.clear();
+  }
+
+ private:
+  static void Hook(void* data, const std::string& reason) {
+    // We use += because when we're hooking the aborts like this, multiple problems can be found.
+    *reinterpret_cast<std::string*>(data) += reason;
+  }
+
+  JavaVMExt* vm_;
+  std::string actual_;
+
+  DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
+};
+
+// TODO: These tests were disabled for portable when we went to having
+// MCLinker link LLVM ELF output because we no longer just have code
+// blobs in memory. We'll need to dlopen to load and relocate
+// temporary output to resurrect these tests.
+#define TEST_DISABLED_FOR_PORTABLE() \
+  if (kUsePortableCompiler) { \
+    printf("WARNING: TEST DISABLED FOR PORTABLE\n"); \
+    return; \
+  }
+
+}  // namespace art
+
+namespace std {
+
+// TODO: isn't gtest supposed to be able to print STL types for itself?
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs) {
+  os << ::art::ToString(rhs);
+  return os;
+}
+
+}  // namespace std
+
+#endif  // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/common_test.h b/runtime/common_test.h
deleted file mode 100644
index 9eaec46..0000000
--- a/runtime/common_test.h
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_COMMON_TEST_H_
-#define ART_RUNTIME_COMMON_TEST_H_
-
-#include <dirent.h>
-#include <dlfcn.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <fstream>
-
-#include "../../external/icu4c/common/unicode/uvernum.h"
-#include "../compiler/compiler_backend.h"
-#include "../compiler/dex/quick/dex_file_to_method_inliner_map.h"
-#include "../compiler/dex/verification_results.h"
-#include "../compiler/driver/compiler_callbacks_impl.h"
-#include "../compiler/driver/compiler_driver.h"
-#include "../compiler/driver/compiler_options.h"
-#include "base/macros.h"
-#include "base/stl_util.h"
-#include "base/stringprintf.h"
-#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
-#include "compiler_callbacks.h"
-#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "gc/heap.h"
-#include "gtest/gtest.h"
-#include "instruction_set.h"
-#include "interpreter/interpreter.h"
-#include "mirror/class_loader.h"
-#include "oat_file.h"
-#include "object_utils.h"
-#include "os.h"
-#include "runtime.h"
-#include "scoped_thread_state_change.h"
-#include "ScopedLocalRef.h"
-#include "thread.h"
-#include "utils.h"
-#include "UniquePtr.h"
-#include "verifier/method_verifier.h"
-#include "verifier/method_verifier-inl.h"
-#include "well_known_classes.h"
-
-namespace art {
-
-static const byte kBase64Map[256] = {
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255,  62, 255, 255, 255,  63,
-  52,  53,  54,  55,  56,  57,  58,  59,  60,  61, 255, 255,
-  255, 254, 255, 255, 255,   0,   1,   2,   3,   4,   5,   6,
-    7,   8,   9,  10,  11,  12,  13,  14,  15,  16,  17,  18,  // NOLINT
-   19,  20,  21,  22,  23,  24,  25, 255, 255, 255, 255, 255,  // NOLINT
-  255,  26,  27,  28,  29,  30,  31,  32,  33,  34,  35,  36,
-   37,  38,  39,  40,  41,  42,  43,  44,  45,  46,  47,  48,  // NOLINT
-   49,  50,  51, 255, 255, 255, 255, 255, 255, 255, 255, 255,  // NOLINT
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-  255, 255, 255, 255
-};
-
-byte* DecodeBase64(const char* src, size_t* dst_size) {
-  std::vector<byte> tmp;
-  uint32_t t = 0, y = 0;
-  int g = 3;
-  for (size_t i = 0; src[i] != '\0'; ++i) {
-    byte c = kBase64Map[src[i] & 0xFF];
-    if (c == 255) continue;
-    // the final = symbols are read and used to trim the remaining bytes
-    if (c == 254) {
-      c = 0;
-      // prevent g < 0 which would potentially allow an overflow later
-      if (--g < 0) {
-        *dst_size = 0;
-        return NULL;
-      }
-    } else if (g != 3) {
-      // we only allow = to be at the end
-      *dst_size = 0;
-      return NULL;
-    }
-    t = (t << 6) | c;
-    if (++y == 4) {
-      tmp.push_back((t >> 16) & 255);
-      if (g > 1) {
-        tmp.push_back((t >> 8) & 255);
-      }
-      if (g > 2) {
-        tmp.push_back(t & 255);
-      }
-      y = t = 0;
-    }
-  }
-  if (y != 0) {
-    *dst_size = 0;
-    return NULL;
-  }
-  UniquePtr<byte[]> dst(new byte[tmp.size()]);
-  if (dst_size != NULL) {
-    *dst_size = tmp.size();
-  } else {
-    *dst_size = 0;
-  }
-  std::copy(tmp.begin(), tmp.end(), dst.get());
-  return dst.release();
-}
-
-class ScratchFile {
- public:
-  ScratchFile() {
-    filename_ = getenv("ANDROID_DATA");
-    filename_ += "/TmpFile-XXXXXX";
-    int fd = mkstemp(&filename_[0]);
-    CHECK_NE(-1, fd);
-    file_.reset(new File(fd, GetFilename()));
-  }
-
-  ~ScratchFile() {
-    int unlink_result = unlink(filename_.c_str());
-    CHECK_EQ(0, unlink_result);
-  }
-
-  const std::string& GetFilename() const {
-    return filename_;
-  }
-
-  File* GetFile() const {
-    return file_.get();
-  }
-
-  int GetFd() const {
-    return file_->Fd();
-  }
-
- private:
-  std::string filename_;
-  UniquePtr<File> file_;
-};
-
-#if defined(__arm__)
-
-#include <sys/ucontext.h>
-
-// A signal handler called when have an illegal instruction.  We record the fact in
-// a global boolean and then increment the PC in the signal context to return to
-// the next instruction.  We know the instruction is an sdiv (4 bytes long).
-static void baddivideinst(int signo, siginfo *si, void *data) {
-  (void)signo;
-  (void)si;
-  struct ucontext *uc = (struct ucontext *)data;
-  struct sigcontext *sc = &uc->uc_mcontext;
-  sc->arm_r0 = 0;     // set R0 to #0 to signal error
-  sc->arm_pc += 4;    // skip offending instruction
-}
-
-// This is in arch/arm/arm_sdiv.S.  It does the following:
-// mov r1,#1
-// sdiv r0,r1,r1
-// bx lr
-//
-// the result will be the value 1 if sdiv is supported.  If it is not supported
-// a SIGILL signal will be raised and the signal handler (baddivideinst) called.
-// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction.
-// Thus if the instruction is not supported, the result of this function will be #0
-
-extern "C" bool CheckForARMSDIVInstruction();
-
-static InstructionSetFeatures GuessInstructionFeatures() {
-  InstructionSetFeatures f;
-
-  // Uncomment this for processing of /proc/cpuinfo.
-  if (false) {
-    // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
-    // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
-    std::ifstream in("/proc/cpuinfo");
-    if (in) {
-      while (!in.eof()) {
-        std::string line;
-        std::getline(in, line);
-        if (!in.eof()) {
-          if (line.find("Features") != std::string::npos) {
-            if (line.find("idivt") != std::string::npos) {
-              f.SetHasDivideInstruction(true);
-            }
-          }
-        }
-        in.close();
-      }
-    } else {
-      LOG(INFO) << "Failed to open /proc/cpuinfo";
-    }
-  }
-
-  // See if have a sdiv instruction.  Register a signal handler and try to execute
-  // an sdiv instruction.  If we get a SIGILL then it's not supported.  We can't use
-  // the /proc/cpuinfo method for this because Krait devices don't always put the idivt
-  // feature in the list.
-  struct sigaction sa, osa;
-  sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
-  sa.sa_sigaction = baddivideinst;
-  sigaction(SIGILL, &sa, &osa);
-
-  if (CheckForARMSDIVInstruction()) {
-    f.SetHasDivideInstruction(true);
-  }
-
-  // Restore the signal handler.
-  sigaction(SIGILL, &osa, NULL);
-
-  // Other feature guesses in here.
-  return f;
-}
-
-#endif
-
-// Given a set of instruction features from the build, parse it.  The
-// input 'str' is a comma separated list of feature names.  Parse it and
-// return the InstructionSetFeatures object.
-static InstructionSetFeatures ParseFeatureList(std::string str) {
-  InstructionSetFeatures result;
-  typedef std::vector<std::string> FeatureList;
-  FeatureList features;
-  Split(str, ',', features);
-  for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
-    std::string feature = Trim(*i);
-    if (feature == "default") {
-      // Nothing to do.
-    } else if (feature == "div") {
-      // Supports divide instruction.
-      result.SetHasDivideInstruction(true);
-    } else if (feature == "nodiv") {
-      // Turn off support for divide instruction.
-      result.SetHasDivideInstruction(false);
-    } else {
-      LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'";
-    }
-  }
-  // Others...
-  return result;
-}
-
-class CommonTest : public testing::Test {
- public:
-  static void MakeExecutable(const std::vector<uint8_t>& code) {
-    CHECK_NE(code.size(), 0U);
-    MakeExecutable(&code[0], code.size());
-  }
-
-  // Create an OatMethod based on pointers (for unit tests).
-  OatFile::OatMethod CreateOatMethod(const void* code,
-                                     const size_t frame_size_in_bytes,
-                                     const uint32_t core_spill_mask,
-                                     const uint32_t fp_spill_mask,
-                                     const uint8_t* mapping_table,
-                                     const uint8_t* vmap_table,
-                                     const uint8_t* gc_map) {
-    const byte* base;
-    uint32_t code_offset, mapping_table_offset, vmap_table_offset, gc_map_offset;
-    if (mapping_table == nullptr && vmap_table == nullptr && gc_map == nullptr) {
-      base = reinterpret_cast<const byte*>(code);  // Base of data points at code.
-      base -= kPointerSize;  // Move backward so that code_offset != 0.
-      code_offset = kPointerSize;
-      mapping_table_offset = 0;
-      vmap_table_offset = 0;
-      gc_map_offset = 0;
-    } else {
-      // TODO: 64bit support.
-      base = nullptr;  // Base of data in oat file, ie 0.
-      code_offset = PointerToLowMemUInt32(code);
-      mapping_table_offset = PointerToLowMemUInt32(mapping_table);
-      vmap_table_offset = PointerToLowMemUInt32(vmap_table);
-      gc_map_offset = PointerToLowMemUInt32(gc_map);
-    }
-    return OatFile::OatMethod(base,
-                              code_offset,
-                              frame_size_in_bytes,
-                              core_spill_mask,
-                              fp_spill_mask,
-                              mapping_table_offset,
-                              vmap_table_offset,
-                              gc_map_offset);
-  }
-
-  void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(method != nullptr);
-
-    const CompiledMethod* compiled_method = nullptr;
-    if (!method->IsAbstract()) {
-      mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
-      const DexFile& dex_file = *dex_cache->GetDexFile();
-      compiled_method =
-          compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
-                                                              method->GetDexMethodIndex()));
-    }
-    if (compiled_method != nullptr) {
-      const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
-      if (code == nullptr) {
-        code = compiled_method->GetPortableCode();
-      }
-      MakeExecutable(*code);
-      const void* method_code = CompiledMethod::CodePointer(&(*code)[0],
-                                                            compiled_method->GetInstructionSet());
-      LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
-      OatFile::OatMethod oat_method = CreateOatMethod(method_code,
-                                                      compiled_method->GetFrameSizeInBytes(),
-                                                      compiled_method->GetCoreSpillMask(),
-                                                      compiled_method->GetFpSpillMask(),
-                                                      &compiled_method->GetMappingTable()[0],
-                                                      &compiled_method->GetVmapTable()[0],
-                                                      NULL);
-      oat_method.LinkMethod(method);
-      method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
-    } else {
-      // No code? You must mean to go into the interpreter.
-      const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
-                                                     : GetQuickToInterpreterBridge();
-      OatFile::OatMethod oat_method = CreateOatMethod(method_code,
-                                                      kStackAlignment,
-                                                      0,
-                                                      0,
-                                                      NULL,
-                                                      NULL,
-                                                      NULL);
-      oat_method.LinkMethod(method);
-      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
-    }
-    // Create bridges to transition between different kinds of compiled bridge.
-    if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
-      method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
-    } else {
-      CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
-      method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
-      method->SetIsPortableCompiled();
-    }
-  }
-
-  static void MakeExecutable(const void* code_start, size_t code_length) {
-    CHECK(code_start != NULL);
-    CHECK_NE(code_length, 0U);
-    uintptr_t data = reinterpret_cast<uintptr_t>(code_start);
-    uintptr_t base = RoundDown(data, kPageSize);
-    uintptr_t limit = RoundUp(data + code_length, kPageSize);
-    uintptr_t len = limit - base;
-    int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
-    CHECK_EQ(result, 0);
-
-    // Flush instruction cache
-    // Only uses __builtin___clear_cache if GCC >= 4.3.3
-#if GCC_VERSION >= 40303
-    __builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
-#else
-    LOG(FATAL) << "UNIMPLEMENTED: cache flush";
-#endif
-  }
-
-  static void SetEnvironmentVariables(std::string& android_data) {
-    if (IsHost()) {
-      // $ANDROID_ROOT is set on the device, but not on the host.
-      // We need to set this so that icu4c can find its locale data.
-      std::string root;
-      const char* android_build_top = getenv("ANDROID_BUILD_TOP");
-      if (android_build_top != nullptr) {
-        root += android_build_top;
-      } else {
-        // Not set by build server, so default to current directory
-        char* cwd = getcwd(nullptr, 0);
-        setenv("ANDROID_BUILD_TOP", cwd, 1);
-        root += cwd;
-        free(cwd);
-      }
-#if defined(__linux__)
-      root += "/out/host/linux-x86";
-#elif defined(__APPLE__)
-      root += "/out/host/darwin-x86";
-#else
-#error unsupported OS
-#endif
-      setenv("ANDROID_ROOT", root.c_str(), 1);
-      setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
-
-      // Not set by build server, so default
-      if (getenv("ANDROID_HOST_OUT") == nullptr) {
-        setenv("ANDROID_HOST_OUT", root.c_str(), 1);
-      }
-    }
-
-    // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
-    android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX");
-    if (mkdtemp(&android_data[0]) == NULL) {
-      PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed";
-    }
-    setenv("ANDROID_DATA", android_data.c_str(), 1);
-  }
-
-  void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    std::string class_descriptor(DotToDescriptor(class_name));
-    Thread* self = Thread::Current();
-    SirtRef<mirror::ClassLoader> loader(self, class_loader);
-    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
-    CHECK(klass != NULL) << "Class not found " << class_name;
-    for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
-      MakeExecutable(klass->GetDirectMethod(i));
-    }
-    for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
-      MakeExecutable(klass->GetVirtualMethod(i));
-    }
-  }
-
- protected:
-  static bool IsHost() {
-    return !kIsTargetBuild;
-  }
-
-  virtual void SetUp() {
-    SetEnvironmentVariables(android_data_);
-    dalvik_cache_.append(android_data_.c_str());
-    dalvik_cache_.append("/dalvik-cache");
-    int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
-    ASSERT_EQ(mkdir_result, 0);
-
-    std::string error_msg;
-    java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName().c_str(),
-                                        GetLibCoreDexFileName().c_str(), &error_msg);
-    if (java_lang_dex_file_ == NULL) {
-      LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "': "
-          << error_msg << "\n";
-    }
-    boot_class_path_.push_back(java_lang_dex_file_);
-
-    std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
-    std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
-
-    // TODO: make selectable
-    CompilerBackend::Kind compiler_backend = kUsePortableCompiler
-        ? CompilerBackend::kPortable
-        : CompilerBackend::kQuick;
-
-    compiler_options_.reset(new CompilerOptions);
-    verification_results_.reset(new VerificationResults(compiler_options_.get()));
-    method_inliner_map_.reset(new DexFileToMethodInlinerMap);
-    callbacks_.reset(new CompilerCallbacksImpl(verification_results_.get(),
-                                               method_inliner_map_.get()));
-    Runtime::Options options;
-    options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
-    options.push_back(std::make_pair("bootclasspath", &boot_class_path_));
-    options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast<void*>(NULL)));
-    options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
-    options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
-    if (!Runtime::Create(options, false)) {
-      LOG(FATAL) << "Failed to create runtime";
-      return;
-    }
-    runtime_.reset(Runtime::Current());
-    // Runtime::Create acquired the mutator_lock_ that is normally given away when we
-    // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess.
-    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-    {
-      ScopedObjectAccess soa(Thread::Current());
-      ASSERT_TRUE(runtime_.get() != NULL);
-      class_linker_ = runtime_->GetClassLinker();
-
-      InstructionSet instruction_set = kNone;
-
-      // Take the default set of instruction features from the build.
-      InstructionSetFeatures instruction_set_features =
-          ParseFeatureList(STRINGIFY(ART_DEFAULT_INSTRUCTION_SET_FEATURES));
-
-#if defined(__arm__)
-      instruction_set = kThumb2;
-      InstructionSetFeatures runtime_features = GuessInstructionFeatures();
-
-      // for ARM, do a runtime check to make sure that the features we are passed from
-      // the build match the features we actually determine at runtime.
-      ASSERT_EQ(instruction_set_features, runtime_features);
-#elif defined(__mips__)
-      instruction_set = kMips;
-#elif defined(__i386__)
-      instruction_set = kX86;
-#elif defined(__x86_64__)
-      instruction_set = kX86_64;
-      // TODO: x86_64 compilation support.
-      runtime_->SetCompilerFilter(Runtime::kInterpretOnly);
-#endif
-
-      for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
-        Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
-        if (!runtime_->HasCalleeSaveMethod(type)) {
-          runtime_->SetCalleeSaveMethod(
-              runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
-        }
-      }
-      class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
-      timer_.reset(new CumulativeLogger("Compilation times"));
-      compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
-                                                verification_results_.get(),
-                                                method_inliner_map_.get(),
-                                                compiler_backend, instruction_set,
-                                                instruction_set_features,
-                                                true, new CompilerDriver::DescriptorSet,
-                                                2, true, true, timer_.get()));
-    }
-    // We typically don't generate an image in unit tests, disable this optimization by default.
-    compiler_driver_->SetSupportBootImageFixup(false);
-
-    // We're back in native, take the opportunity to initialize well known classes.
-    WellKnownClasses::Init(Thread::Current()->GetJniEnv());
-    // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread
-    // pool is created by the runtime.
-    runtime_->GetHeap()->CreateThreadPool();
-    runtime_->GetHeap()->VerifyHeap();  // Check for heap corruption before the test
-  }
-
-  virtual void TearDown() {
-    const char* android_data = getenv("ANDROID_DATA");
-    ASSERT_TRUE(android_data != NULL);
-    DIR* dir = opendir(dalvik_cache_.c_str());
-    ASSERT_TRUE(dir != NULL);
-    dirent* e;
-    while ((e = readdir(dir)) != NULL) {
-      if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
-        continue;
-      }
-      std::string filename(dalvik_cache_);
-      filename.push_back('/');
-      filename.append(e->d_name);
-      int unlink_result = unlink(filename.c_str());
-      ASSERT_EQ(0, unlink_result);
-    }
-    closedir(dir);
-    int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
-    ASSERT_EQ(0, rmdir_cache_result);
-    int rmdir_data_result = rmdir(android_data_.c_str());
-    ASSERT_EQ(0, rmdir_data_result);
-
-    // icu4c has a fixed 10-element array "gCommonICUDataArray".
-    // If we run > 10 tests, we fill that array and u_setCommonData fails.
-    // There's a function to clear the array, but it's not public...
-    typedef void (*IcuCleanupFn)();
-    void* sym = dlsym(RTLD_DEFAULT, "u_cleanup_" U_ICU_VERSION_SHORT);
-    CHECK(sym != NULL);
-    IcuCleanupFn icu_cleanup_fn = reinterpret_cast<IcuCleanupFn>(sym);
-    (*icu_cleanup_fn)();
-
-    compiler_driver_.reset();
-    timer_.reset();
-    callbacks_.reset();
-    method_inliner_map_.reset();
-    verification_results_.reset();
-    compiler_options_.reset();
-    STLDeleteElements(&opened_dex_files_);
-
-    Runtime::Current()->GetHeap()->VerifyHeap();  // Check for heap corruption after the test
-  }
-
-  std::string GetLibCoreDexFileName() {
-    return GetDexFileName("core-libart");
-  }
-
-  std::string GetDexFileName(const std::string& jar_prefix) {
-    if (IsHost()) {
-      const char* host_dir = getenv("ANDROID_HOST_OUT");
-      CHECK(host_dir != NULL);
-      return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str());
-    }
-    return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
-  }
-
-  std::string GetTestAndroidRoot() {
-    if (IsHost()) {
-      const char* host_dir = getenv("ANDROID_HOST_OUT");
-      CHECK(host_dir != NULL);
-      return host_dir;
-    }
-    return GetAndroidRoot();
-  }
-
-  const DexFile* OpenTestDexFile(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(name != NULL);
-    std::string filename;
-    if (IsHost()) {
-      filename += getenv("ANDROID_HOST_OUT");
-      filename += "/framework/";
-    } else {
-      filename += "/data/nativetest/art/";
-    }
-    filename += "art-test-dex-";
-    filename += name;
-    filename += ".jar";
-    std::string error_msg;
-    const DexFile* dex_file = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg);
-    CHECK(dex_file != NULL) << "Failed to open '" << filename << "': " << error_msg;
-    CHECK_EQ(PROT_READ, dex_file->GetPermissions());
-    CHECK(dex_file->IsReadOnly());
-    opened_dex_files_.push_back(dex_file);
-    return dex_file;
-  }
-
-  jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    const DexFile* dex_file = OpenTestDexFile(dex_name);
-    CHECK(dex_file != NULL);
-    class_linker_->RegisterDexFile(*dex_file);
-    std::vector<const DexFile*> class_path;
-    class_path.push_back(dex_file);
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    ScopedLocalRef<jobject> class_loader_local(soa.Env(),
-        soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
-    jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
-    soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
-    Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path);
-    return class_loader;
-  }
-
-  void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    std::string class_descriptor(DotToDescriptor(class_name));
-    Thread* self = Thread::Current();
-    SirtRef<mirror::ClassLoader> loader(self, class_loader);
-    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
-    CHECK(klass != NULL) << "Class not found " << class_name;
-    for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
-      CompileMethod(klass->GetDirectMethod(i));
-    }
-    for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
-      CompileMethod(klass->GetVirtualMethod(i));
-    }
-  }
-
-  void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(method != NULL);
-    TimingLogger timings("CommonTest::CompileMethod", false, false);
-    timings.StartSplit("CompileOne");
-    compiler_driver_->CompileOne(method, timings);
-    MakeExecutable(method);
-    timings.EndSplit();
-  }
-
-  void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
-                           const char* method_name, const char* signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    std::string class_descriptor(DotToDescriptor(class_name));
-    Thread* self = Thread::Current();
-    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
-    CHECK(klass != NULL) << "Class not found " << class_name;
-    mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature);
-    CHECK(method != NULL) << "Direct method not found: "
-                          << class_name << "." << method_name << signature;
-    CompileMethod(method);
-  }
-
-  void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
-                            const char* method_name, const char* signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    std::string class_descriptor(DotToDescriptor(class_name));
-    Thread* self = Thread::Current();
-    mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
-    CHECK(klass != NULL) << "Class not found " << class_name;
-    mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
-    CHECK(method != NULL) << "Virtual method not found: "
-                          << class_name << "." << method_name << signature;
-    CompileMethod(method);
-  }
-
-  void ReserveImageSpace() {
-    // Reserve where the image will be loaded up front so that other parts of test set up don't
-    // accidentally end up colliding with the fixed memory address when we need to load the image.
-    std::string error_msg;
-    image_reservation_.reset(MemMap::MapAnonymous("image reservation",
-                                                  reinterpret_cast<byte*>(ART_BASE_ADDRESS),
-                                                  (size_t)100 * 1024 * 1024,  // 100MB
-                                                  PROT_NONE,
-                                                  false /* no need for 4gb flag with fixed mmap*/,
-                                                  &error_msg));
-    CHECK(image_reservation_.get() != nullptr) << error_msg;
-  }
-
-  void UnreserveImageSpace() {
-    image_reservation_.reset();
-  }
-
-  std::string android_data_;
-  std::string dalvik_cache_;
-  const DexFile* java_lang_dex_file_;  // owned by runtime_
-  std::vector<const DexFile*> boot_class_path_;
-  UniquePtr<Runtime> runtime_;
-  // Owned by the runtime
-  ClassLinker* class_linker_;
-  UniquePtr<CompilerOptions> compiler_options_;
-  UniquePtr<VerificationResults> verification_results_;
-  UniquePtr<DexFileToMethodInlinerMap> method_inliner_map_;
-  UniquePtr<CompilerCallbacksImpl> callbacks_;
-  UniquePtr<CompilerDriver> compiler_driver_;
-  UniquePtr<CumulativeLogger> timer_;
-
- private:
-  std::vector<const DexFile*> opened_dex_files_;
-  UniquePtr<MemMap> image_reservation_;
-};
-
-// Sets a CheckJni abort hook to catch failures. Note that this will cause CheckJNI to carry on
-// rather than aborting, so be careful!
-class CheckJniAbortCatcher {
- public:
-  CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
-    vm_->check_jni_abort_hook = Hook;
-    vm_->check_jni_abort_hook_data = &actual_;
-  }
-
-  ~CheckJniAbortCatcher() {
-    vm_->check_jni_abort_hook = NULL;
-    vm_->check_jni_abort_hook_data = NULL;
-    EXPECT_TRUE(actual_.empty()) << actual_;
-  }
-
-  void Check(const char* expected_text) {
-    EXPECT_TRUE(actual_.find(expected_text) != std::string::npos) << "\n"
-        << "Expected to find: " << expected_text << "\n"
-        << "In the output   : " << actual_;
-    actual_.clear();
-  }
-
- private:
-  static void Hook(void* data, const std::string& reason) {
-    // We use += because when we're hooking the aborts like this, multiple problems can be found.
-    *reinterpret_cast<std::string*>(data) += reason;
-  }
-
-  JavaVMExt* vm_;
-  std::string actual_;
-
-  DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
-};
-
-// TODO: These tests were disabled for portable when we went to having
-// MCLinker link LLVM ELF output because we no longer just have code
-// blobs in memory. We'll need to dlopen to load and relocate
-// temporary output to resurrect these tests.
-#define TEST_DISABLED_FOR_PORTABLE() \
-  if (kUsePortableCompiler) { \
-    printf("WARNING: TEST DISABLED FOR PORTABLE\n"); \
-    return; \
-  }
-
-}  // namespace art
-
-namespace std {
-
-// TODO: isn't gtest supposed to be able to print STL types for itself?
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs) {
-  os << ::art::ToString(rhs);
-  return os;
-}
-
-}  // namespace std
-
-#endif  // ART_RUNTIME_COMMON_TEST_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9f09709..3b4e9c7 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1202,7 +1202,9 @@
   if (c == NULL) {
     return status;
   }
-  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length));
+  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
+                                                        c->GetComponentSize(),
+                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
   return JDWP::ERR_NONE;
 }
 
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 543a7b0..9b6859a 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -17,11 +17,11 @@
 #include "dex_file.h"
 
 #include "UniquePtr.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
-class DexFileTest : public CommonTest {};
+class DexFileTest : public CommonRuntimeTest {};
 
 TEST_F(DexFileTest, Open) {
   ScopedObjectAccess soa(Thread::Current());
@@ -29,6 +29,77 @@
   ASSERT_TRUE(dex != NULL);
 }
 
+static const byte kBase64Map[256] = {
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255,  62, 255, 255, 255,  63,
+  52,  53,  54,  55,  56,  57,  58,  59,  60,  61, 255, 255,
+  255, 254, 255, 255, 255,   0,   1,   2,   3,   4,   5,   6,
+    7,   8,   9,  10,  11,  12,  13,  14,  15,  16,  17,  18,  // NOLINT
+   19,  20,  21,  22,  23,  24,  25, 255, 255, 255, 255, 255,  // NOLINT
+  255,  26,  27,  28,  29,  30,  31,  32,  33,  34,  35,  36,
+   37,  38,  39,  40,  41,  42,  43,  44,  45,  46,  47,  48,  // NOLINT
+   49,  50,  51, 255, 255, 255, 255, 255, 255, 255, 255, 255,  // NOLINT
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+  255, 255, 255, 255
+};
+
+static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
+  std::vector<byte> tmp;
+  uint32_t t = 0, y = 0;
+  int g = 3;
+  for (size_t i = 0; src[i] != '\0'; ++i) {
+    byte c = kBase64Map[src[i] & 0xFF];
+    if (c == 255) continue;
+    // the final = symbols are read and used to trim the remaining bytes
+    if (c == 254) {
+      c = 0;
+      // prevent g < 0 which would potentially allow an overflow later
+      if (--g < 0) {
+        *dst_size = 0;
+        return nullptr;
+      }
+    } else if (g != 3) {
+      // we only allow = to be at the end
+      *dst_size = 0;
+      return nullptr;
+    }
+    t = (t << 6) | c;
+    if (++y == 4) {
+      tmp.push_back((t >> 16) & 255);
+      if (g > 1) {
+        tmp.push_back((t >> 8) & 255);
+      }
+      if (g > 2) {
+        tmp.push_back(t & 255);
+      }
+      y = t = 0;
+    }
+  }
+  if (y != 0) {
+    *dst_size = 0;
+    return nullptr;
+  }
+  UniquePtr<byte[]> dst(new byte[tmp.size()]);
+  if (dst_size != nullptr) {
+    *dst_size = tmp.size();
+  } else {
+    *dst_size = 0;
+  }
+  std::copy(tmp.begin(), tmp.end(), dst.get());
+  return dst.release();
+}
+
 // Although this is the same content logically as the Nested test dex,
 // the DexFileHeader test is sensitive to subtle changes in the
 // contents due to the checksum etc, so we embed the exact input here.
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index 2941db6..5e2d89e 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -16,11 +16,11 @@
 
 #include "dex_method_iterator.h"
 
-#include "common_test.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
-class DexMethodIteratorTest : public CommonTest {
+class DexMethodIteratorTest : public CommonRuntimeTest {
  public:
   const DexFile* OpenDexFile(const std::string& partial_filename) {
     std::string dfn = GetDexFileName(partial_filename);
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e4e58b8..3a17e41 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -22,10 +22,10 @@
 
 namespace art {
 
-ElfFile::ElfFile()
-  : file_(NULL),
-    writable_(false),
-    program_header_only_(false),
+ElfFile::ElfFile(File* file, bool writable, bool program_header_only)
+  : file_(file),
+    writable_(writable),
+    program_header_only_(program_header_only),
     header_(NULL),
     base_address_(NULL),
     program_headers_start_(NULL),
@@ -38,23 +38,20 @@
     dynstr_section_start_(NULL),
     hash_section_start_(NULL),
     symtab_symbol_table_(NULL),
-    dynsym_symbol_table_(NULL) {}
+    dynsym_symbol_table_(NULL) {
+  CHECK(file != NULL);
+}
 
 ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only,
                        std::string* error_msg) {
-  UniquePtr<ElfFile> elf_file(new ElfFile());
-  if (!elf_file->Setup(file, writable, program_header_only, error_msg)) {
+  UniquePtr<ElfFile> elf_file(new ElfFile(file, writable, program_header_only));
+  if (!elf_file->Setup(error_msg)) {
     return nullptr;
   }
   return elf_file.release();
 }
 
-bool ElfFile::Setup(File* file, bool writable, bool program_header_only, std::string* error_msg) {
-  CHECK(file != NULL);
-  file_ = file;
-  writable_ = writable;
-  program_header_only_ = program_header_only;
-
+bool ElfFile::Setup(std::string* error_msg) {
   int prot;
   int flags;
   if (writable_) {
@@ -79,7 +76,7 @@
     return false;
   }
 
-  if (program_header_only) {
+  if (program_header_only_) {
     // first just map ELF header to get program header size information
     size_t elf_header_size = sizeof(Elf32_Ehdr);
     if (!SetMap(MemMap::MapFile(elf_header_size, prot, flags, file_->Fd(), 0,
@@ -114,7 +111,7 @@
   // Either way, the program header is relative to the elf header
   program_headers_start_ = Begin() + GetHeader().e_phoff;
 
-  if (!program_header_only) {
+  if (!program_header_only_) {
     // Setup section headers.
     section_headers_start_ = Begin() + GetHeader().e_shoff;
 
@@ -192,7 +189,8 @@
       || (ELFMAG1 != header_->e_ident[EI_MAG1])
       || (ELFMAG2 != header_->e_ident[EI_MAG2])
       || (ELFMAG3 != header_->e_ident[EI_MAG3])) {
-    *error_msg = StringPrintf("Failed to find ELF magic in %s: %c%c%c%c",
+    *error_msg = StringPrintf("Failed to find ELF magic value %d %d %d %d in %s, found %d %d %d %d",
+                              ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3,
                               file_->GetPath().c_str(),
                               header_->e_ident[EI_MAG0],
                               header_->e_ident[EI_MAG1],
@@ -200,61 +198,142 @@
                               header_->e_ident[EI_MAG3]);
     return false;
   }
+  if (ELFCLASS32 != header_->e_ident[EI_CLASS]) {
+    *error_msg = StringPrintf("Failed to find expected EI_CLASS value %d in %s, found %d",
+                              ELFCLASS32,
+                              file_->GetPath().c_str(),
+                              header_->e_ident[EI_CLASS]);
+    return false;
+  }
+  if (ELFDATA2LSB != header_->e_ident[EI_DATA]) {
+    *error_msg = StringPrintf("Failed to find expected EI_DATA value %d in %s, found %d",
+                              ELFDATA2LSB,
+                              file_->GetPath().c_str(),
+                              header_->e_ident[EI_CLASS]);
+    return false;
+  }
+  if (EV_CURRENT != header_->e_ident[EI_VERSION]) {
+    *error_msg = StringPrintf("Failed to find expected EI_VERSION value %d in %s, found %d",
+                              EV_CURRENT,
+                              file_->GetPath().c_str(),
+                              header_->e_ident[EI_CLASS]);
+    return false;
+  }
+  if (ET_DYN != header_->e_type) {
+    *error_msg = StringPrintf("Failed to find expected e_type value %d in %s, found %d",
+                              ET_DYN,
+                              file_->GetPath().c_str(),
+                              header_->e_type);
+    return false;
+  }
+  if (EV_CURRENT != header_->e_version) {
+    *error_msg = StringPrintf("Failed to find expected e_version value %d in %s, found %d",
+                              EV_CURRENT,
+                              file_->GetPath().c_str(),
+                              header_->e_version);
+    return false;
+  }
+  if (0 != header_->e_entry) {
+    *error_msg = StringPrintf("Failed to find expected e_entry value %d in %s, found %d",
+                              0,
+                              file_->GetPath().c_str(),
+                              header_->e_entry);
+    return false;
+  }
+  if (0 == header_->e_phoff) {
+    *error_msg = StringPrintf("Failed to find non-zero e_phoff value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_shoff) {
+    *error_msg = StringPrintf("Failed to find non-zero e_shoff value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_ehsize) {
+    *error_msg = StringPrintf("Failed to find non-zero e_ehsize value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_phentsize) {
+    *error_msg = StringPrintf("Failed to find non-zero e_phentsize value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_phnum) {
+    *error_msg = StringPrintf("Failed to find non-zero e_phnum value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_shentsize) {
+    *error_msg = StringPrintf("Failed to find non-zero e_shentsize value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_shnum) {
+    *error_msg = StringPrintf("Failed to find non-zero e_shnum value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (0 == header_->e_shstrndx) {
+    *error_msg = StringPrintf("Failed to find non-zero e_shstrndx value in %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  if (header_->e_shstrndx >= header_->e_shnum) {
+    *error_msg = StringPrintf("Failed to find e_shnum value %d less than %d in %s",
+                              header_->e_shstrndx,
+                              header_->e_shnum,
+                              file_->GetPath().c_str());
+    return false;
+  }
 
-
-  // TODO: remove these static_casts from enum when using -std=gnu++0x
-  CHECK_EQ(static_cast<unsigned char>(ELFCLASS32),  header_->e_ident[EI_CLASS])   << file_->GetPath();
-  CHECK_EQ(static_cast<unsigned char>(ELFDATA2LSB), header_->e_ident[EI_DATA])    << file_->GetPath();
-  CHECK_EQ(static_cast<unsigned char>(EV_CURRENT),  header_->e_ident[EI_VERSION]) << file_->GetPath();
-
-  // TODO: remove these static_casts from enum when using -std=gnu++0x
-  CHECK_EQ(static_cast<Elf32_Half>(ET_DYN), header_->e_type) << file_->GetPath();
-  CHECK_EQ(static_cast<Elf32_Word>(EV_CURRENT), header_->e_version) << file_->GetPath();
-  CHECK_EQ(0U, header_->e_entry) << file_->GetPath();
-
-  CHECK_NE(0U, header_->e_phoff) << file_->GetPath();
-  CHECK_NE(0U, header_->e_shoff) << file_->GetPath();
-  CHECK_NE(0U, header_->e_ehsize) << file_->GetPath();
-  CHECK_NE(0U, header_->e_phentsize) << file_->GetPath();
-  CHECK_NE(0U, header_->e_phnum) << file_->GetPath();
-  CHECK_NE(0U, header_->e_shentsize) << file_->GetPath();
-  CHECK_NE(0U, header_->e_shnum) << file_->GetPath();
-  CHECK_NE(0U, header_->e_shstrndx) << file_->GetPath();
-  CHECK_GE(header_->e_shnum, header_->e_shstrndx) << file_->GetPath();
   if (!program_header_only_) {
-    CHECK_GT(Size(), header_->e_phoff) << file_->GetPath();
-    CHECK_GT(Size(), header_->e_shoff) << file_->GetPath();
+    if (header_->e_phoff >= Size()) {
+      *error_msg = StringPrintf("Failed to find e_phoff value %d less than %zd in %s",
+                                header_->e_phoff,
+                                Size(),
+                                file_->GetPath().c_str());
+      return false;
+    }
+    if (header_->e_shoff >= Size()) {
+      *error_msg = StringPrintf("Failed to find e_shoff value %d less than %zd in %s",
+                                header_->e_shoff,
+                                Size(),
+                                file_->GetPath().c_str());
+      return false;
+    }
   }
   return true;
 }
 
 
-Elf32_Ehdr& ElfFile::GetHeader() {
+Elf32_Ehdr& ElfFile::GetHeader() const {
   CHECK(header_ != NULL);
   return *header_;
 }
 
-byte* ElfFile::GetProgramHeadersStart() {
+byte* ElfFile::GetProgramHeadersStart() const {
   CHECK(program_headers_start_ != NULL);
   return program_headers_start_;
 }
 
-byte* ElfFile::GetSectionHeadersStart() {
+byte* ElfFile::GetSectionHeadersStart() const {
   CHECK(section_headers_start_ != NULL);
   return section_headers_start_;
 }
 
-Elf32_Phdr& ElfFile::GetDynamicProgramHeader() {
+Elf32_Phdr& ElfFile::GetDynamicProgramHeader() const {
   CHECK(dynamic_program_header_ != NULL);
   return *dynamic_program_header_;
 }
 
-Elf32_Dyn* ElfFile::GetDynamicSectionStart() {
+Elf32_Dyn* ElfFile::GetDynamicSectionStart() const {
   CHECK(dynamic_section_start_ != NULL);
   return dynamic_section_start_;
 }
 
-Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) {
+Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) const {
   CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
   Elf32_Sym* symbol_section_start;
   switch (section_type) {
@@ -275,7 +354,7 @@
   return symbol_section_start;
 }
 
-const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) {
+const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) const {
   CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
   const char* string_section_start;
   switch (section_type) {
@@ -296,7 +375,7 @@
   return string_section_start;
 }
 
-const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) {
+const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) const {
   CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
   if (i == 0) {
     return NULL;
@@ -306,43 +385,43 @@
   return string;
 }
 
-Elf32_Word* ElfFile::GetHashSectionStart() {
+Elf32_Word* ElfFile::GetHashSectionStart() const {
   CHECK(hash_section_start_ != NULL);
   return hash_section_start_;
 }
 
-Elf32_Word ElfFile::GetHashBucketNum() {
+Elf32_Word ElfFile::GetHashBucketNum() const {
   return GetHashSectionStart()[0];
 }
 
-Elf32_Word ElfFile::GetHashChainNum() {
+Elf32_Word ElfFile::GetHashChainNum() const {
   return GetHashSectionStart()[1];
 }
 
-Elf32_Word ElfFile::GetHashBucket(size_t i) {
+Elf32_Word ElfFile::GetHashBucket(size_t i) const {
   CHECK_LT(i, GetHashBucketNum());
   // 0 is nbucket, 1 is nchain
   return GetHashSectionStart()[2 + i];
 }
 
-Elf32_Word ElfFile::GetHashChain(size_t i) {
+Elf32_Word ElfFile::GetHashChain(size_t i) const {
   CHECK_LT(i, GetHashChainNum());
   // 0 is nbucket, 1 is nchain, & chains are after buckets
   return GetHashSectionStart()[2 + GetHashBucketNum() + i];
 }
 
-Elf32_Word ElfFile::GetProgramHeaderNum() {
+Elf32_Word ElfFile::GetProgramHeaderNum() const {
   return GetHeader().e_phnum;
 }
 
-Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) {
+Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) const {
   CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath();
   byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
   CHECK_LT(program_header, End()) << file_->GetPath();
   return *reinterpret_cast<Elf32_Phdr*>(program_header);
 }
 
-Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) {
+Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) const {
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf32_Phdr& program_header = GetProgramHeader(i);
     if (program_header.p_type == type) {
@@ -352,11 +431,11 @@
   return NULL;
 }
 
-Elf32_Word ElfFile::GetSectionHeaderNum() {
+Elf32_Word ElfFile::GetSectionHeaderNum() const {
   return GetHeader().e_shnum;
 }
 
-Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) {
+Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) const {
   // Can only access arbitrary sections when we have the whole file, not just program header.
   // Even if we Load(), it doesn't bring in all the sections.
   CHECK(!program_header_only_) << file_->GetPath();
@@ -366,7 +445,7 @@
   return *reinterpret_cast<Elf32_Shdr*>(section_header);
 }
 
-Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) {
+Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) const {
   // Can only access arbitrary sections when we have the whole file, not just program header.
   // We could change this to switch on known types if they were detected during loading.
   CHECK(!program_header_only_) << file_->GetPath();
@@ -393,11 +472,11 @@
   return h;
 }
 
-Elf32_Shdr& ElfFile::GetSectionNameStringSection() {
+Elf32_Shdr& ElfFile::GetSectionNameStringSection() const {
   return GetSectionHeader(GetHeader().e_shstrndx);
 }
 
-byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) {
+const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
   Elf32_Word hash = elfhash(symbol_name.c_str());
   Elf32_Word bucket_index = hash % GetHashBucketNum();
   Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index);
@@ -416,14 +495,15 @@
   return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
 }
 
-Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) {
-  CHECK(IsSymbolSectionType(section_header.sh_type)) << file_->GetPath() << " " << section_header.sh_type;
+Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) const {
+  CHECK(IsSymbolSectionType(section_header.sh_type))
+      << file_->GetPath() << " " << section_header.sh_type;
   CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
   return section_header.sh_size / section_header.sh_entsize;
 }
 
 Elf32_Sym& ElfFile::GetSymbol(Elf32_Word section_type,
-                                         Elf32_Word i) {
+                              Elf32_Word i) const {
   return *(GetSymbolSectionStart(section_type) + i);
 }
 
@@ -467,7 +547,8 @@
         if (name == NULL) {
           continue;
         }
-        std::pair<SymbolTable::iterator, bool> result = (*symbol_table)->insert(std::make_pair(name, &symbol));
+        std::pair<SymbolTable::iterator, bool> result =
+            (*symbol_table)->insert(std::make_pair(name, &symbol));
         if (!result.second) {
           // If a duplicate, make sure it has the same logical value. Seen on x86.
           CHECK_EQ(symbol.st_value, result.first->second->st_value);
@@ -504,8 +585,8 @@
 }
 
 Elf32_Addr ElfFile::FindSymbolAddress(Elf32_Word section_type,
-                                                 const std::string& symbol_name,
-                                                 bool build_map) {
+                                      const std::string& symbol_name,
+                                      bool build_map) {
   Elf32_Sym* symbol = FindSymbolByName(section_type, symbol_name, build_map);
   if (symbol == NULL) {
     return 0;
@@ -513,7 +594,7 @@
   return symbol->st_value;
 }
 
-const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) {
+const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) const {
   CHECK(!program_header_only_) << file_->GetPath();
   // TODO: remove this static_cast from enum when using -std=gnu++0x
   CHECK_EQ(static_cast<Elf32_Word>(SHT_STRTAB), string_section.sh_type) << file_->GetPath();
@@ -527,16 +608,16 @@
   return reinterpret_cast<const char*>(string);
 }
 
-Elf32_Word ElfFile::GetDynamicNum() {
+Elf32_Word ElfFile::GetDynamicNum() const {
   return GetDynamicProgramHeader().p_filesz / sizeof(Elf32_Dyn);
 }
 
-Elf32_Dyn& ElfFile::GetDynamic(Elf32_Word i) {
+Elf32_Dyn& ElfFile::GetDynamic(Elf32_Word i) const {
   CHECK_LT(i, GetDynamicNum()) << file_->GetPath();
   return *(GetDynamicSectionStart() + i);
 }
 
-Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) {
+Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) const {
   for (Elf32_Word i = 0; i < GetDynamicNum(); i++) {
     Elf32_Dyn& elf_dyn = GetDynamic(i);
     if (elf_dyn.d_tag == type) {
@@ -546,41 +627,41 @@
   return 0;
 }
 
-Elf32_Rel* ElfFile::GetRelSectionStart(Elf32_Shdr& section_header) {
+Elf32_Rel* ElfFile::GetRelSectionStart(Elf32_Shdr& section_header) const {
   CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   return reinterpret_cast<Elf32_Rel*>(Begin() + section_header.sh_offset);
 }
 
-Elf32_Word ElfFile::GetRelNum(Elf32_Shdr& section_header) {
+Elf32_Word ElfFile::GetRelNum(Elf32_Shdr& section_header) const {
   CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
   return section_header.sh_size / section_header.sh_entsize;
 }
 
-Elf32_Rel& ElfFile::GetRel(Elf32_Shdr& section_header, Elf32_Word i) {
+Elf32_Rel& ElfFile::GetRel(Elf32_Shdr& section_header, Elf32_Word i) const {
   CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath();
   return *(GetRelSectionStart(section_header) + i);
 }
 
-Elf32_Rela* ElfFile::GetRelaSectionStart(Elf32_Shdr& section_header) {
+Elf32_Rela* ElfFile::GetRelaSectionStart(Elf32_Shdr& section_header) const {
   CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   return reinterpret_cast<Elf32_Rela*>(Begin() + section_header.sh_offset);
 }
 
-Elf32_Word ElfFile::GetRelaNum(Elf32_Shdr& section_header) {
+Elf32_Word ElfFile::GetRelaNum(Elf32_Shdr& section_header) const {
   CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   return section_header.sh_size / section_header.sh_entsize;
 }
 
-Elf32_Rela& ElfFile::GetRela(Elf32_Shdr& section_header, Elf32_Word i) {
+Elf32_Rela& ElfFile::GetRela(Elf32_Shdr& section_header, Elf32_Word i) const {
   CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
   CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath();
   return *(GetRelaSectionStart(section_header) + i);
 }
 
 // Base on bionic phdr_table_get_load_size
-size_t ElfFile::GetLoadedSize() {
+size_t ElfFile::GetLoadedSize() const {
   Elf32_Addr min_vaddr = 0xFFFFFFFFu;
   Elf32_Addr max_vaddr = 0x00000000u;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
@@ -605,7 +686,6 @@
 }
 
 bool ElfFile::Load(bool executable, std::string* error_msg) {
-  // TODO: actually return false error
   CHECK(program_header_only_) << file_->GetPath();
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf32_Phdr& program_header = GetProgramHeader(i);
@@ -643,11 +723,14 @@
     if (program_header.p_vaddr == 0) {
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
-      std::string error_msg;
       UniquePtr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
                                                      NULL, GetLoadedSize(), PROT_NONE, false,
-                                                     &error_msg));
-      CHECK(reserve.get() != NULL) << file_->GetPath() << ": " << error_msg;
+                                                     error_msg));
+      if (reserve.get() == nullptr) {
+        *error_msg = StringPrintf("Failed to allocate %s: %s",
+                                  reservation_name.c_str(), error_msg->c_str());
+        return false;
+      }
       base_address_ = reserve->Begin();
       segments_.push_back(reserve.release());
     }
@@ -687,8 +770,17 @@
                                                        true,
                                                        file_->GetPath().c_str(),
                                                        error_msg));
-    CHECK(segment.get() != nullptr) << *error_msg;
-    CHECK_EQ(segment->Begin(), p_vaddr) << file_->GetPath();
+    if (segment.get() == nullptr) {
+      *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
+                                i, file_->GetPath().c_str(), error_msg->c_str());
+      return false;
+    }
+    if (segment->Begin() != p_vaddr) {
+      *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
+                                "instead mapped to %p",
+                                i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+      return false;
+    }
     segments_.push_back(segment.release());
   }
 
@@ -700,19 +792,39 @@
     byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
     switch (elf_dyn.d_tag) {
       case DT_HASH: {
+        if (!ValidPointer(d_ptr)) {
+          *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
+                                    d_ptr, file_->GetPath().c_str());
+          return false;
+        }
         hash_section_start_ = reinterpret_cast<Elf32_Word*>(d_ptr);
         break;
       }
       case DT_STRTAB: {
+        if (!ValidPointer(d_ptr)) {
+          *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
+                                    d_ptr, file_->GetPath().c_str());
+          return false;
+        }
         dynstr_section_start_ = reinterpret_cast<char*>(d_ptr);
         break;
       }
       case DT_SYMTAB: {
+        if (!ValidPointer(d_ptr)) {
+          *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
+                                    d_ptr, file_->GetPath().c_str());
+          return false;
+        }
         dynsym_section_start_ = reinterpret_cast<Elf32_Sym*>(d_ptr);
         break;
       }
       case DT_NULL: {
-        CHECK_EQ(GetDynamicNum(), i+1);
+        if (GetDynamicNum() != i+1) {
+          *error_msg = StringPrintf("DT_NULL found after %d .dynamic entries, "
+                                    "expected %d as implied by size of PT_DYNAMIC segment in %s",
+                                    i + 1, GetDynamicNum(), file_->GetPath().c_str());
+          return false;
+        }
         break;
       }
     }
@@ -721,4 +833,14 @@
   return true;
 }
 
+bool ElfFile::ValidPointer(const byte* start) const {
+  for (size_t i = 0; i < segments_.size(); ++i) {
+    const MemMap* segment = segments_[i];
+    if (segment->Begin() <= start && start < segment->End()) {
+      return true;
+    }
+  }
+  return false;
+}
+
 }  // namespace art
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index baf4356..8a0a5f8 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -39,15 +39,15 @@
 
   // Load segments into memory based on PT_LOAD program headers
 
-  File& GetFile() const {
+  const File& GetFile() const {
     return *file_;
   }
 
-  byte* Begin() {
+  byte* Begin() const {
     return map_->Begin();
   }
 
-  byte* End() {
+  byte* End() const {
     return map_->End();
   }
 
@@ -55,24 +55,24 @@
     return map_->Size();
   }
 
-  Elf32_Ehdr& GetHeader();
+  Elf32_Ehdr& GetHeader() const;
 
-  Elf32_Word GetProgramHeaderNum();
-  Elf32_Phdr& GetProgramHeader(Elf32_Word);
-  Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type);
+  Elf32_Word GetProgramHeaderNum() const;
+  Elf32_Phdr& GetProgramHeader(Elf32_Word) const;
+  Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const;
 
-  Elf32_Word GetSectionHeaderNum();
-  Elf32_Shdr& GetSectionHeader(Elf32_Word);
-  Elf32_Shdr* FindSectionByType(Elf32_Word type);
+  Elf32_Word GetSectionHeaderNum() const;
+  Elf32_Shdr& GetSectionHeader(Elf32_Word) const;
+  Elf32_Shdr* FindSectionByType(Elf32_Word type) const;
 
-  Elf32_Shdr& GetSectionNameStringSection();
+  Elf32_Shdr& GetSectionNameStringSection() const;
 
   // Find .dynsym using .hash for more efficient lookup than FindSymbolAddress.
-  byte* FindDynamicSymbolAddress(const std::string& symbol_name);
+  const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
 
   static bool IsSymbolSectionType(Elf32_Word section_type);
-  Elf32_Word GetSymbolNum(Elf32_Shdr&);
-  Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i);
+  Elf32_Word GetSymbolNum(Elf32_Shdr&) const;
+  Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i) const;
 
   // Find symbol in specified table, returning NULL if it is not found.
   //
@@ -83,73 +83,77 @@
   // should be set unless only a small number of symbols will be
   // looked up.
   Elf32_Sym* FindSymbolByName(Elf32_Word section_type,
-                                           const std::string& symbol_name,
-                                           bool build_map);
+                              const std::string& symbol_name,
+                              bool build_map);
 
   // Find address of symbol in specified table, returning 0 if it is
   // not found. See FindSymbolByName for an explanation of build_map.
   Elf32_Addr FindSymbolAddress(Elf32_Word section_type,
-                                            const std::string& symbol_name,
-                                            bool build_map);
+                               const std::string& symbol_name,
+                               bool build_map);
 
   // Lookup a string given string section and offset. Returns NULL for
   // special 0 offset.
-  const char* GetString(Elf32_Shdr&, Elf32_Word);
+  const char* GetString(Elf32_Shdr&, Elf32_Word) const;
 
   // Lookup a string by section type. Returns NULL for special 0 offset.
-  const char* GetString(Elf32_Word section_type, Elf32_Word);
+  const char* GetString(Elf32_Word section_type, Elf32_Word) const;
 
-  Elf32_Word GetDynamicNum();
-  Elf32_Dyn& GetDynamic(Elf32_Word);
-  Elf32_Word FindDynamicValueByType(Elf32_Sword type);
+  Elf32_Word GetDynamicNum() const;
+  Elf32_Dyn& GetDynamic(Elf32_Word) const;
+  Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
 
-  Elf32_Word GetRelNum(Elf32_Shdr&);
-  Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word);
+  Elf32_Word GetRelNum(Elf32_Shdr&) const;
+  Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word) const;
 
-  Elf32_Word GetRelaNum(Elf32_Shdr&);
-  Elf32_Rela& GetRela(Elf32_Shdr&, Elf32_Word);
+  Elf32_Word GetRelaNum(Elf32_Shdr&) const;
+  Elf32_Rela& GetRela(Elf32_Shdr&, Elf32_Word) const;
 
   // Returns the expected size when the file is loaded at runtime
-  size_t GetLoadedSize();
+  size_t GetLoadedSize() const;
 
   // Load segments into memory based on PT_LOAD program headers.
   // executable is true at run time, false at compile time.
   bool Load(bool executable, std::string* error_msg);
 
  private:
-  ElfFile();
+  ElfFile(File* file, bool writable, bool program_header_only);
 
-  bool Setup(File* file, bool writable, bool program_header_only, std::string* error_msg);
+  bool Setup(std::string* error_msg);
 
   bool SetMap(MemMap* map, std::string* error_msg);
 
-  byte* GetProgramHeadersStart();
-  byte* GetSectionHeadersStart();
-  Elf32_Phdr& GetDynamicProgramHeader();
-  Elf32_Dyn* GetDynamicSectionStart();
-  Elf32_Sym* GetSymbolSectionStart(Elf32_Word section_type);
-  const char* GetStringSectionStart(Elf32_Word section_type);
-  Elf32_Rel* GetRelSectionStart(Elf32_Shdr&);
-  Elf32_Rela* GetRelaSectionStart(Elf32_Shdr&);
-  Elf32_Word* GetHashSectionStart();
-  Elf32_Word GetHashBucketNum();
-  Elf32_Word GetHashChainNum();
-  Elf32_Word GetHashBucket(size_t i);
-  Elf32_Word GetHashChain(size_t i);
+  byte* GetProgramHeadersStart() const;
+  byte* GetSectionHeadersStart() const;
+  Elf32_Phdr& GetDynamicProgramHeader() const;
+  Elf32_Dyn* GetDynamicSectionStart() const;
+  Elf32_Sym* GetSymbolSectionStart(Elf32_Word section_type) const;
+  const char* GetStringSectionStart(Elf32_Word section_type) const;
+  Elf32_Rel* GetRelSectionStart(Elf32_Shdr&) const;
+  Elf32_Rela* GetRelaSectionStart(Elf32_Shdr&) const;
+  Elf32_Word* GetHashSectionStart() const;
+  Elf32_Word GetHashBucketNum() const;
+  Elf32_Word GetHashChainNum() const;
+  Elf32_Word GetHashBucket(size_t i) const;
+  Elf32_Word GetHashChain(size_t i) const;
 
   typedef std::map<std::string, Elf32_Sym*> SymbolTable;
   SymbolTable** GetSymbolTable(Elf32_Word section_type);
 
-  File* file_;
-  bool writable_;
-  bool program_header_only_;
+  bool ValidPointer(const byte* start) const;
 
-  // ELF header mapping. If program_header_only_ is false, will actually point to the entire elf file.
+  const File* const file_;
+  const bool writable_;
+  const bool program_header_only_;
+
+  // ELF header mapping. If program_header_only_ is false, will
+  // actually point to the entire elf file.
   UniquePtr<MemMap> map_;
   Elf32_Ehdr* header_;
   std::vector<MemMap*> segments_;
 
-  // Pointer to start of first PT_LOAD program segment after Load() when program_header_only_ is true.
+  // Pointer to start of first PT_LOAD program segment after Load()
+  // when program_header_only_ is true.
   byte* base_address_;
 
   // The program header should always available but use GetProgramHeadersStart() to be sure.
@@ -161,8 +165,8 @@
   Elf32_Dyn* dynamic_section_start_;
   Elf32_Sym* symtab_section_start_;
   Elf32_Sym* dynsym_section_start_;
-  const char* strtab_section_start_;
-  const char* dynstr_section_start_;
+  char* strtab_section_start_;
+  char* dynstr_section_start_;
   Elf32_Word* hash_section_start_;
 
   SymbolTable* symtab_symbol_table_;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 4078cac..829ec4a 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -87,7 +87,8 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // Use the current allocator type in case CheckFilledNewArrayAlloc caused us to suspend and then
   // the heap switched the allocator type while we were suspended.
-  return mirror::Array::Alloc<false>(self, klass, component_count, heap->GetCurrentAllocator());
+  return mirror::Array::Alloc<false>(self, klass, component_count, klass->GetComponentSize(),
+                                     heap->GetCurrentAllocator());
 }
 
 // Helper function to allocate array for FILLED_NEW_ARRAY.
@@ -103,7 +104,8 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // Use the current allocator type in case CheckFilledNewArrayAlloc caused us to suspend and then
   // the heap switched the allocator type while we were suspended.
-  return mirror::Array::Alloc<true>(self, klass, component_count, heap->GetCurrentAllocator());
+  return mirror::Array::Alloc<true>(self, klass, component_count, klass->GetComponentSize(),
+                                    heap->GetCurrentAllocator());
 }
 
 void ThrowStackOverflowError(Thread* self) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 2c08351..2ced942 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -228,9 +228,11 @@
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
     return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
+                                               klass->GetComponentSize(),
                                                heap->GetCurrentAllocator());
   }
-  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type);
+  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
+                                             klass->GetComponentSize(), allocator_type);
 }
 
 template <bool kAccessCheck, bool kInstrumented>
@@ -252,9 +254,10 @@
       return nullptr;  // Failure
     }
   }
-  // No need to retry a slow-path allocation as the above code won't
-  // cause a GC or thread suspension.
-  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type);
+  // No need to retry a slow-path allocation as the above code won't cause a GC or thread
+  // suspension.
+  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
+                                             klass->GetComponentSize(), allocator_type);
 }
 
 extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc
index ca8b931..b69aeb4 100644
--- a/runtime/entrypoints/math_entrypoints_test.cc
+++ b/runtime/entrypoints/math_entrypoints_test.cc
@@ -16,12 +16,13 @@
 
 #include "math_entrypoints.h"
 
-#include "common_test.h"
 #include <limits>
 
+#include "common_runtime_test.h"
+
 namespace art {
 
-class MathEntrypointsTest : public CommonTest {};
+class MathEntrypointsTest : public CommonRuntimeTest {};
 
 TEST_F(MathEntrypointsTest, DoubleToLong) {
   EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19));
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 910a817..3653b37 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -15,10 +15,10 @@
  */
 
 #include "class_linker.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "dex_file.h"
 #include "gtest/gtest.h"
-#include "leb128_encoder.h"
+#include "leb128.h"
 #include "mirror/class-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
@@ -32,10 +32,10 @@
 
 namespace art {
 
-class ExceptionTest : public CommonTest {
+class ExceptionTest : public CommonRuntimeTest {
  protected:
   virtual void SetUp() {
-    CommonTest::SetUp();
+    CommonRuntimeTest::SetUp();
 
     ScopedObjectAccess soa(Thread::Current());
     SirtRef<mirror::ClassLoader> class_loader(
@@ -77,7 +77,7 @@
     method_f_ = my_klass_->FindVirtualMethod("f", "()I");
     ASSERT_TRUE(method_f_ != NULL);
     method_f_->SetFrameSizeInBytes(kStackAlignment);
-    method_f_->SetEntryPointFromQuickCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
+    method_f_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
     method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
     method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
     method_f_->SetNativeGcMap(&fake_gc_map_[0]);
@@ -85,7 +85,7 @@
     method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
     ASSERT_TRUE(method_g_ != NULL);
     method_g_->SetFrameSizeInBytes(kStackAlignment);
-    method_g_->SetEntryPointFromQuickCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
+    method_g_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
     method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
     method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
     method_g_->SetNativeGcMap(&fake_gc_map_[0]);
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index e70704f..ba4e2ac 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -16,20 +16,18 @@
 
 #include "space_bitmap.h"
 
-#include "common_test.h"
+#include <stdint.h>
+
+#include "common_runtime_test.h"
 #include "globals.h"
 #include "space_bitmap-inl.h"
 #include "UniquePtr.h"
 
-#include <stdint.h>
-
 namespace art {
 namespace gc {
 namespace accounting {
 
-class SpaceBitmapTest : public CommonTest {
- public:
-};
+class SpaceBitmapTest : public CommonRuntimeTest {};
 
 TEST_F(SpaceBitmapTest, Init) {
   byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index c4238c7..5b4ca80 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -54,10 +54,10 @@
 namespace gc {
 namespace allocator {
 
-// A Runs-of-slots memory allocator.
+// A runs-of-slots memory allocator.
 class RosAlloc {
  private:
-  // Rerepresents a run of free pages.
+  // Represents a run of free pages.
   class FreePageRun {
    public:
     byte magic_num_;  // The magic number used for debugging only.
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8ca3892..7b2bc3b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -450,6 +450,12 @@
 
 inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
   DCHECK(!IsImmune(obj));
+
+  if (kUseBrooksPointer) {
+    // Verify all the objects have the correct Brooks pointer installed.
+    obj->AssertSelfBrooksPointer();
+  }
+
   // Try to take advantage of locality of references within a space, failing this find the space
   // the hard way.
   accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
@@ -470,6 +476,11 @@
 inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
   DCHECK(obj != NULL);
 
+  if (kUseBrooksPointer) {
+    // Verify all the objects have the correct Brooks pointer installed.
+    obj->AssertSelfBrooksPointer();
+  }
+
   if (IsImmune(obj)) {
     DCHECK(IsMarked(obj));
     return;
@@ -532,6 +543,11 @@
 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
   DCHECK(obj != NULL);
 
+  if (kUseBrooksPointer) {
+    // Verify all the objects have the correct Brooks pointer installed.
+    obj->AssertSelfBrooksPointer();
+  }
+
   if (IsImmune(obj)) {
     DCHECK(IsMarked(obj));
     return false;
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 29fafd6..c55b2b2 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -64,16 +64,18 @@
 
   ~MarkSweep() {}
 
-  virtual void InitializePhase();
-  virtual bool IsConcurrent() const;
-  virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void InitializePhase() OVERRIDE;
+  virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   virtual void MarkReachableObjects()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const {
+
+  virtual bool IsConcurrent() const OVERRIDE;
+
+  virtual GcType GetGcType() const OVERRIDE {
     return kGcTypeFull;
   }
 
@@ -131,7 +133,7 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Update and mark references from immune spaces.
+  // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep.
   virtual void UpdateAndMarkModUnion()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -140,7 +142,8 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Sweeps unmarked objects to complete the garbage collection.
+  // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
+  // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
   virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
@@ -232,7 +235,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Mark the vm thread roots.
-  virtual void MarkThreadRoots(Thread* self)
+  void MarkThreadRoots(Thread* self)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 3b788f4..44ae9e9 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,8 @@
 
 class PartialMarkSweep : public MarkSweep {
  public:
-  virtual GcType GetGcType() const {
+  // Virtual as overridden by StickyMarkSweep.
+  virtual GcType GetGcType() const OVERRIDE {
     return kGcTypePartial;
   }
 
@@ -35,8 +36,9 @@
 
  protected:
   // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
-  // collections, ie the Zygote space. Also mark this space is immune.
-  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
+  // StickyMarkSweep.
+  virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index fe8c253..a4c9dea 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -133,13 +133,15 @@
       immune_end_(nullptr),
       is_large_object_space_immune_(false),
       to_space_(nullptr),
+      to_space_live_bitmap_(nullptr),
       from_space_(nullptr),
       self_(nullptr),
       generational_(generational),
       last_gc_to_space_end_(nullptr),
       bytes_promoted_(0),
       whole_heap_collection_(true),
-      whole_heap_collection_interval_counter_(0) {
+      whole_heap_collection_interval_counter_(0),
+      saved_bytes_(0) {
 }
 
 void SemiSpace::InitializePhase() {
@@ -263,7 +265,7 @@
     semi_space_->ScanObject(obj);
   }
  private:
-  SemiSpace* semi_space_;
+  SemiSpace* const semi_space_;
 };
 
 void SemiSpace::MarkReachableObjects() {
@@ -467,10 +469,10 @@
     // of an old generation.)
     size_t bytes_promoted;
     space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
-    forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted);
+    forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr);
     if (forward_address == nullptr) {
       // If out of space, fall back to the to-space.
-      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
+      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
     } else {
       GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
       bytes_promoted_ += bytes_promoted;
@@ -511,12 +513,18 @@
     DCHECK(forward_address != nullptr);
   } else {
     // If it's allocated after the last GC (younger), copy it to the to-space.
-    forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
+    forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
   }
   // Copy over the object and add it to the mark stack since we still need to update its
   // references.
   saved_bytes_ +=
       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
+  if (kUseBrooksPointer) {
+    obj->AssertSelfBrooksPointer();
+    DCHECK_EQ(forward_address->GetBrooksPointer(), obj);
+    forward_address->SetBrooksPointer(forward_address);
+    forward_address->AssertSelfBrooksPointer();
+  }
   if (to_space_live_bitmap_ != nullptr) {
     to_space_live_bitmap_->Set(forward_address);
   }
@@ -529,6 +537,12 @@
 // the to-space and have their forward address updated. Objects which have been newly marked are
 // pushed on the mark stack.
 Object* SemiSpace::MarkObject(Object* obj) {
+  if (kUseBrooksPointer) {
+    // Verify all the objects have the correct forward pointer installed.
+    if (obj != nullptr) {
+      obj->AssertSelfBrooksPointer();
+    }
+  }
   Object* forward_address = obj;
   if (obj != nullptr && !IsImmune(obj)) {
     if (from_space_->HasAddress(obj)) {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index ba97376..c164c5f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -275,7 +275,7 @@
   // When true, the generational mode (promotion and the bump pointer
   // space only collection) is enabled. TODO: move these to a new file
   // as a new garbage collector?
-  bool generational_;
+  const bool generational_;
 
   // Used for the generational mode. the end/top of the bump
   // pointer space at the end of the last collection.
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 9e3adb4..ce51ac5 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -59,11 +59,6 @@
   SweepArray(GetHeap()->GetLiveStack(), false);
 }
 
-void StickyMarkSweep::MarkThreadRoots(Thread* self) {
-  MarkRootsCheckpoint(self);
-}
-
-
 }  // namespace collector
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index b675877..98f2b59 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -25,9 +25,9 @@
 namespace gc {
 namespace collector {
 
-class StickyMarkSweep : public PartialMarkSweep {
+class StickyMarkSweep FINAL : public PartialMarkSweep {
  public:
-  GcType GetGcType() const {
+  GcType GetGcType() const OVERRIDE {
     return kGcTypeSticky;
   }
 
@@ -37,21 +37,17 @@
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
-  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void MarkReachableObjects()
+  void MarkReachableObjects() OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  virtual void MarkThreadRoots(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void Sweep(bool swap_bitmaps) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Don't need to do anything special here since we scan all the cards which may have references
   // to the newly allocated objects.
-  virtual void UpdateAndMarkModUnion() { }
+  void UpdateAndMarkModUnion() OVERRIDE { }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3d591f0..e089ef2 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -50,11 +50,13 @@
   }
   mirror::Object* obj;
   AllocationTimer alloc_timer(this, &obj);
-  size_t bytes_allocated;
-  obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated);
+  size_t bytes_allocated, usable_size;
+  obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
+                                            &usable_size);
   if (UNLIKELY(obj == nullptr)) {
     bool is_current_allocator = allocator == GetCurrentAllocator();
-    obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &klass);
+    obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
+                                 &klass);
     if (obj == nullptr) {
       bool after_is_current_allocator = allocator == GetCurrentAllocator();
       if (is_current_allocator && !after_is_current_allocator) {
@@ -64,9 +66,17 @@
       return nullptr;
     }
   }
-  obj->SetClass(klass);
-  pre_fence_visitor(obj);
   DCHECK_GT(bytes_allocated, 0u);
+  DCHECK_GT(usable_size, 0u);
+  obj->SetClass(klass);
+  if (kUseBrooksPointer) {
+    obj->SetBrooksPointer(obj);
+    obj->AssertSelfBrooksPointer();
+  }
+  pre_fence_visitor(obj, usable_size);
+  if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
+    CHECK_LE(obj->SizeOf(), usable_size);
+  }
   const size_t new_num_bytes_allocated =
       static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
   // TODO: Deprecate.
@@ -144,7 +154,8 @@
 
 template <const bool kInstrumented, const bool kGrow>
 inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
-                                           size_t alloc_size, size_t* bytes_allocated) {
+                                           size_t alloc_size, size_t* bytes_allocated,
+                                           size_t* usable_size) {
   if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
     return nullptr;
   }
@@ -156,35 +167,36 @@
       ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
       if (LIKELY(ret != nullptr)) {
         *bytes_allocated = alloc_size;
+        *usable_size = alloc_size;
       }
       break;
     }
     case kAllocatorTypeRosAlloc: {
       if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
         // If running on valgrind, we should be using the instrumented path.
-        ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated);
+        ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
       } else {
         DCHECK(!running_on_valgrind_);
-        ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated);
+        ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
       }
       break;
     }
     case kAllocatorTypeDlMalloc: {
       if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
         // If running on valgrind, we should be using the instrumented path.
-        ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated);
+        ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
       } else {
         DCHECK(!running_on_valgrind_);
-        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated);
+        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
       }
       break;
     }
     case kAllocatorTypeNonMoving: {
-      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
+      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
       break;
     }
     case kAllocatorTypeLOS: {
-      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
+      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
       // Note that the bump pointer spaces aren't necessarily next to
       // the other continuous spaces like the non-moving alloc space or
       // the zygote space.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 58db7a8..8d8cdd6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -952,6 +952,7 @@
 
 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
                                              size_t alloc_size, size_t* bytes_allocated,
+                                             size_t* usable_size,
                                              mirror::Class** klass) {
   mirror::Object* ptr = nullptr;
   bool was_default_allocator = allocator == GetCurrentAllocator();
@@ -968,7 +969,7 @@
       return nullptr;
     }
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
-    ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
+    ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
   }
 
   // Loop through our different Gc types and try to Gc until we get enough free memory.
@@ -985,13 +986,13 @@
     }
     if (gc_ran) {
       // Did we free sufficient memory for the allocation to succeed?
-      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
+      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
     }
   }
   // Allocations have failed after GCs;  this is an exceptional state.
   if (ptr == nullptr) {
     // Try harder, growing the heap if necessary.
-    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
+    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
   }
   if (ptr == nullptr) {
     // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
@@ -1008,7 +1009,7 @@
       *klass = sirt_klass.get();
       return nullptr;
     }
-    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
+    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
     if (ptr == nullptr) {
       ThrowOutOfMemoryError(self, alloc_size, false);
     }
@@ -1318,9 +1319,10 @@
 }
 
 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector : public collector::SemiSpace {
+class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
  public:
-  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") {
+  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector"),
+      bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
   }
 
   void BuildBins(space::ContinuousSpace* space) {
@@ -1382,7 +1384,7 @@
       // No available space in the bins, place it in the target space instead (grows the zygote
       // space).
       size_t bytes_allocated;
-      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
+      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
       if (to_space_live_bitmap_ != nullptr) {
         to_space_live_bitmap_->Set(forward_address);
       } else {
@@ -1402,6 +1404,12 @@
     }
     // Copy the object over to its new location.
     memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+    if (kUseBrooksPointer) {
+      obj->AssertSelfBrooksPointer();
+      DCHECK_EQ(forward_address->GetBrooksPointer(), obj);
+      forward_address->SetBrooksPointer(forward_address);
+      forward_address->AssertSelfBrooksPointer();
+    }
     return forward_address;
   }
 };
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5d44ee1..5d3232f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -151,18 +151,24 @@
   ~Heap();
 
   // Allocates and initializes storage for an object instance.
-  template <bool kInstrumented>
-  mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+  template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+  mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
+                              const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
-                                                         GetCurrentAllocator());
+                                                         GetCurrentAllocator(),
+                                                         pre_fence_visitor);
   }
-  template <bool kInstrumented>
-  mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+
+  template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+  mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
+                                        const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
-                                                         GetCurrentNonMovingAllocator());
+                                                         GetCurrentNonMovingAllocator(),
+                                                         pre_fence_visitor);
   }
+
   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor>
   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
       Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
@@ -570,7 +576,8 @@
   // Handles Allocate()'s slow allocation path with GC involved after
   // an initial allocation attempt failed.
   mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
-                                         size_t* bytes_allocated, mirror::Class** klass)
+                                         size_t* bytes_allocated, size_t* usable_size,
+                                         mirror::Class** klass)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -583,7 +590,8 @@
   // that the switch statement is constant optimized in the entrypoints.
   template <const bool kInstrumented, const bool kGrow>
   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
-                                              size_t alloc_size, size_t* bytes_allocated)
+                                              size_t alloc_size, size_t* bytes_allocated,
+                                              size_t* usable_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation)
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 4b86339..07e5088 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "mirror/class-inl.h"
@@ -25,7 +25,7 @@
 namespace art {
 namespace gc {
 
-class HeapTest : public CommonTest {};
+class HeapTest : public CommonRuntimeTest {};
 
 TEST_F(HeapTest, ClearGrowthLimit) {
   Heap* heap = Runtime::Current()->GetHeap();
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 74a0274..70ab64b 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -23,6 +23,19 @@
 namespace gc {
 namespace space {
 
+inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
+                                               size_t* usable_size) {
+  num_bytes = RoundUp(num_bytes, kAlignment);
+  mirror::Object* ret = AllocNonvirtual(num_bytes);
+  if (LIKELY(ret != nullptr)) {
+    *bytes_allocated = num_bytes;
+    if (usable_size != nullptr) {
+      *usable_size = num_bytes;
+    }
+  }
+  return ret;
+}
+
 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
   DCHECK(IsAligned<kAlignment>(num_bytes));
   byte* old_end;
@@ -49,6 +62,15 @@
   return ret;
 }
 
+inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t num_bytes = obj->SizeOf();
+  if (usable_size != nullptr) {
+    *usable_size = RoundUp(num_bytes, kAlignment);
+  }
+  return num_bytes;
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index f3f594f..43674ea 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -58,19 +58,6 @@
       num_blocks_(0) {
 }
 
-mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) {
-  num_bytes = RoundUp(num_bytes, kAlignment);
-  mirror::Object* ret = AllocNonvirtual(num_bytes);
-  if (LIKELY(ret != nullptr)) {
-    *bytes_allocated = num_bytes;
-  }
-  return ret;
-}
-
-size_t BumpPointerSpace::AllocationSize(mirror::Object* obj) {
-  return AllocationSizeNonvirtual(obj);
-}
-
 void BumpPointerSpace::Clear() {
   // Release the pages back to the operating system.
   CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
@@ -185,8 +172,9 @@
   }
 }
 
-bool BumpPointerSpace::IsEmpty() const {
-  return Begin() == End();
+accounting::SpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
+  LOG(FATAL) << "Unimplemented";
+  return nullptr;
 }
 
 uint64_t BumpPointerSpace::GetBytesAllocated() {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index d7e6f5b..476b833 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -29,12 +29,13 @@
 
 namespace space {
 
-// A bump pointer space is a space where objects may be allocated and garbage collected.
-class BumpPointerSpace : public ContinuousMemMapAllocSpace {
+// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
+// implementation as its intended to be evacuated.
+class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
-  SpaceType GetType() const {
+  SpaceType GetType() const OVERRIDE {
     return kSpaceTypeBumpPointerSpace;
   }
 
@@ -44,26 +45,29 @@
   static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
 
   // Allocate num_bytes, returns nullptr if the space is full.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE;
   mirror::Object* AllocNonvirtual(size_t num_bytes);
   mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return AllocationSizeNonvirtual(obj, usable_size);
+  }
 
   // NOPS unless we support free lists.
-  virtual size_t Free(Thread*, mirror::Object*) {
-    return 0;
-  }
-  virtual size_t FreeList(Thread*, size_t, mirror::Object**) {
+  size_t Free(Thread*, mirror::Object*) OVERRIDE {
     return 0;
   }
 
-  size_t AllocationSizeNonvirtual(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return obj->SizeOf();
+  size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+    return 0;
   }
 
+  size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
   // maximum reserved size of the heap.
   void ClearGrowthLimit() {
@@ -80,16 +84,16 @@
     return GetMemMap()->Size();
   }
 
-  accounting::SpaceBitmap* GetLiveBitmap() const {
+  accounting::SpaceBitmap* GetLiveBitmap() const OVERRIDE {
     return nullptr;
   }
 
-  accounting::SpaceBitmap* GetMarkBitmap() const {
+  accounting::SpaceBitmap* GetMarkBitmap() const OVERRIDE {
     return nullptr;
   }
 
   // Clear the memory and reset the pointer to the start of the space.
-  void Clear() LOCKS_EXCLUDED(block_lock_);
+  void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
 
   void Dump(std::ostream& os) const;
 
@@ -99,7 +103,10 @@
 
   uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsEmpty() const;
+  bool IsEmpty() const {
+    return Begin() == End();
+  }
+
 
   bool Contains(const mirror::Object* obj) const {
     const byte* byte_obj = reinterpret_cast<const byte*>(obj);
@@ -116,7 +123,7 @@
   // Allocate a new TLAB, returns false if the allocation failed.
   bool AllocNewTlab(Thread* self, size_t bytes);
 
-  virtual BumpPointerSpace* AsBumpPointerSpace() {
+  BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
     return this;
   }
 
@@ -124,6 +131,8 @@
   void Walk(ObjectCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  accounting::SpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+
   // Object alignment within the space.
   static constexpr size_t kAlignment = 8;
 
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index c14a4e1..02d8b54 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_
 
 #include "dlmalloc_space.h"
+#include "gc/allocator/dlmalloc.h"
 #include "thread.h"
 
 namespace art {
@@ -25,11 +26,12 @@
 namespace space {
 
 inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
-                                                      size_t* bytes_allocated) {
+                                                      size_t* bytes_allocated,
+                                                      size_t* usable_size) {
   mirror::Object* obj;
   {
     MutexLock mu(self, lock_);
-    obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
+    obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
   }
   if (LIKELY(obj != NULL)) {
     // Zero freshly allocated memory, done while not holding the space's lock.
@@ -38,15 +40,25 @@
   return obj;
 }
 
+inline size_t DlMallocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
+  void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
+  size_t size = mspace_usable_size(obj_ptr);
+  if (usable_size != nullptr) {
+    *usable_size = size;
+  }
+  return size + kChunkOverhead;
+}
+
 inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes,
-                                                               size_t* bytes_allocated) {
+                                                               size_t* bytes_allocated,
+                                                               size_t* usable_size) {
   mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_for_alloc_, num_bytes));
   if (LIKELY(result != NULL)) {
     if (kDebugSpaces) {
       CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
             << ") not in bounds of allocation space " << *this;
     }
-    size_t allocation_size = AllocationSizeNonvirtual(result);
+    size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
     DCHECK(bytes_allocated != NULL);
     *bytes_allocated = allocation_size;
   }
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 1493019..caedaaf 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -25,15 +25,15 @@
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-
-#include <valgrind.h>
-#include <memcheck/memcheck.h>
+#include "valgrind_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
 namespace space {
 
-static const bool kPrefetchDuringDlMallocFreeList = true;
+static constexpr bool kPrefetchDuringDlMallocFreeList = true;
+
+template class ValgrindMallocSpace<DlMallocSpace, void*>;
 
 DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
                              byte* end, byte* limit, size_t growth_limit)
@@ -119,11 +119,8 @@
   return msp;
 }
 
-mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
-  return AllocNonvirtual(self, num_bytes, bytes_allocated);
-}
-
-mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
+                                               size_t* bytes_allocated, size_t* usable_size) {
   mirror::Object* result;
   {
     MutexLock mu(self, lock_);
@@ -131,7 +128,7 @@
     size_t max_allowed = Capacity();
     mspace_set_footprint_limit(mspace_, max_allowed);
     // Try the allocation.
-    result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
+    result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
     // Shrink back down as small as possible.
     size_t footprint = mspace_footprint(mspace_);
     mspace_set_footprint_limit(mspace_, footprint);
@@ -145,7 +142,8 @@
   return result;
 }
 
-MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end,
+MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
+                                           void* allocator, byte* begin, byte* end,
                                            byte* limit, size_t growth_limit) {
   return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit);
 }
@@ -156,7 +154,7 @@
     CHECK(ptr != NULL);
     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
   }
-  const size_t bytes_freed = AllocationSizeNonvirtual(ptr);
+  const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
   if (kRecentFreeCount > 0) {
     RegisterRecentFree(ptr);
   }
@@ -176,7 +174,7 @@
       // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
     }
-    bytes_freed += AllocationSizeNonvirtual(ptr);
+    bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
   }
 
   if (kRecentFreeCount > 0) {
@@ -228,10 +226,6 @@
   return dlmalloc_space->MoreCore(increment);
 }
 
-size_t DlMallocSpace::AllocationSize(mirror::Object* obj) {
-  return AllocationSizeNonvirtual(obj);
-}
-
 size_t DlMallocSpace::Trim() {
   MutexLock mu(Thread::Current(), lock_);
   // Trim to release memory at the end of the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 4507c36..6ea10ad 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
 #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
 
-#include "gc/allocator/dlmalloc.h"
 #include "malloc_space.h"
 #include "space.h"
 
@@ -30,7 +29,8 @@
 
 namespace space {
 
-// An alloc space is a space where objects may be allocated and garbage collected.
+// An alloc space is a space where objects may be allocated and garbage collected. Not final as may
+// be overridden by a ValgrindMallocSpace.
 class DlMallocSpace : public MallocSpace {
  public:
   // Create a DlMallocSpace from an existing mem_map.
@@ -45,21 +45,39 @@
   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
                                size_t capacity, byte* requested_begin);
 
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
-                                          size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
-  virtual size_t AllocationSize(mirror::Object* obj);
-  virtual size_t Free(Thread* self, mirror::Object* ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
-
-  size_t AllocationSizeNonvirtual(mirror::Object* obj) {
-    void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
-    return mspace_usable_size(obj_ptr) + kChunkOverhead;
+  // Virtual to allow ValgrindMallocSpace to intercept.
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                          size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+  // Virtual to allow ValgrindMallocSpace to intercept.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) {
+    return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
   }
+  // Virtual to allow ValgrindMallocSpace to intercept.
+  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+    return AllocationSizeNonvirtual(obj, usable_size);
+  }
+  // Virtual to allow ValgrindMallocSpace to intercept.
+  virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+      LOCKS_EXCLUDED(lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Virtual to allow ValgrindMallocSpace to intercept.
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+      LOCKS_EXCLUDED(lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // DlMallocSpaces don't have thread local state.
+  void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  }
+  void RevokeAllThreadLocalBuffers() OVERRIDE {
+  }
+
+  // Faster non-virtual allocation path.
+  mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                  size_t* usable_size) LOCKS_EXCLUDED(lock_);
+
+  // Faster non-virtual allocation size path.
+  size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
 
 #ifndef NDEBUG
   // Override only in the debug build.
@@ -70,39 +88,37 @@
     return mspace_;
   }
 
-  size_t Trim();
+  size_t Trim() OVERRIDE;
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
   // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
+  void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
 
   // Returns the number of bytes that the space has currently obtained from the system. This is
   // greater or equal to the amount of live data in the space.
-  size_t GetFootprint();
+  size_t GetFootprint() OVERRIDE;
 
   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
-  size_t GetFootprintLimit();
+  size_t GetFootprintLimit() OVERRIDE;
 
   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
-  void SetFootprintLimit(size_t limit);
+  void SetFootprintLimit(size_t limit) OVERRIDE;
 
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
                               byte* begin, byte* end, byte* limit, size_t growth_limit);
 
-  uint64_t GetBytesAllocated();
-  uint64_t GetObjectsAllocated();
+  uint64_t GetBytesAllocated() OVERRIDE;
+  uint64_t GetObjectsAllocated() OVERRIDE;
 
-  // Returns the class of a recently freed object.
-  mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
+  void Clear() OVERRIDE;
 
-  virtual void Clear();
-
-  virtual bool IsDlMallocSpace() const {
+  bool IsDlMallocSpace() const OVERRIDE {
     return true;
   }
-  virtual DlMallocSpace* AsDlMallocSpace() {
+
+  DlMallocSpace* AsDlMallocSpace() OVERRIDE {
     return this;
   }
 
@@ -111,10 +127,12 @@
                 byte* limit, size_t growth_limit);
 
  private:
-  mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated)
+  mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                           size_t* usable_size)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
-  void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool /*low_memory_mode*/) {
+  void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
+                        bool /*low_memory_mode*/) OVERRIDE {
     return CreateMspace(base, morecore_start, initial_size);
   }
   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
@@ -122,11 +140,11 @@
   // The boundary tag overhead.
   static const size_t kChunkOverhead = kWordSize;
 
-  // Underlying malloc space
+  // Underlying malloc space.
   void* const mspace_;
 
-  // A mspace pointer used for allocation. Equals to what mspace_
-  // points to or nullptr after InvalidateAllocator() is called.
+  // An mspace pointer used for allocation. Equals  mspace_ or nullptr after InvalidateAllocator()
+  // is called.
   void* mspace_for_alloc_;
 
   friend class collector::MarkSweep;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 12c5451..76c4d25 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -133,6 +133,11 @@
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
     CHECK(live_bitmap_->Test(obj));
     CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
+    if (kUseBrooksPointer) {
+      CHECK(obj->GetBrooksPointer() == obj)
+          << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
+          << " brooks_ptr=" << reinterpret_cast<void*>(obj->GetBrooksPointer());
+    }
     current += RoundUp(obj->SizeOf(), kObjectAlignment);
   }
 }
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 987a655..1ca132e 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -57,7 +57,7 @@
 }
 
 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
-                                           size_t* bytes_allocated) {
+                                           size_t* bytes_allocated, size_t* usable_size) {
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
                                          PROT_READ | PROT_WRITE, true, &error_msg);
@@ -72,6 +72,9 @@
   size_t allocation_size = mem_map->Size();
   DCHECK(bytes_allocated != NULL);
   *bytes_allocated = allocation_size;
+  if (usable_size != nullptr) {
+    *usable_size = allocation_size;
+  }
   num_bytes_allocated_ += allocation_size;
   total_bytes_allocated_ += allocation_size;
   ++num_objects_allocated_;
@@ -92,9 +95,9 @@
   return allocation_size;
 }
 
-size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj) {
+size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
   MutexLock mu(Thread::Current(), lock_);
-  MemMaps::iterator found = mem_maps_.find(obj);
+  auto found = mem_maps_.find(obj);
   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
   return found->second->Size();
 }
@@ -112,7 +115,7 @@
 
 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
-  for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
+  for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
     MemMap* mem_map = it->second;
     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
     callback(NULL, NULL, 0, arg);
@@ -244,14 +247,19 @@
   return mem_map_->HasAddress(obj);
 }
 
-size_t FreeListSpace::AllocationSize(mirror::Object* obj) {
+size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
   AllocationHeader* header = GetAllocationHeader(obj);
   DCHECK(Contains(obj));
   DCHECK(!header->IsFree());
-  return header->AllocationSize();
+  size_t alloc_size = header->AllocationSize();
+  if (usable_size != nullptr) {
+    *usable_size = alloc_size - sizeof(AllocationHeader);
+  }
+  return alloc_size;
 }
 
-mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                     size_t* usable_size) {
   MutexLock mu(self, lock_);
   size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
   AllocationHeader temp;
@@ -280,13 +288,15 @@
       new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
       free_end_ -= allocation_size;
     } else {
-      return NULL;
+      return nullptr;
     }
   }
 
-  DCHECK(bytes_allocated != NULL);
+  DCHECK(bytes_allocated != nullptr);
   *bytes_allocated = allocation_size;
-
+  if (usable_size != nullptr) {
+    *usable_size = allocation_size - sizeof(AllocationHeader);
+  }
   // Need to do these inside of the lock.
   ++num_objects_allocated_;
   ++total_objects_allocated_;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 5274c8d..b1b0c3c 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -32,20 +32,20 @@
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
-  virtual SpaceType GetType() const {
+  SpaceType GetType() const OVERRIDE {
     return kSpaceTypeLargeObjectSpace;
   }
 
-  virtual void SwapBitmaps();
-  virtual void CopyLiveToMarked();
+  void SwapBitmaps();
+  void CopyLiveToMarked();
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
   virtual ~LargeObjectSpace() {}
 
-  uint64_t GetBytesAllocated() {
+  uint64_t GetBytesAllocated() OVERRIDE {
     return num_bytes_allocated_;
   }
 
-  uint64_t GetObjectsAllocated() {
+  uint64_t GetObjectsAllocated() OVERRIDE {
     return num_objects_allocated_;
   }
 
@@ -57,17 +57,23 @@
     return total_objects_allocated_;
   }
 
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
 
-  virtual bool IsAllocSpace() const {
+  // LargeObjectSpaces don't have thread local state.
+  void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  }
+  void RevokeAllThreadLocalBuffers() OVERRIDE {
+  }
+
+  bool IsAllocSpace() const OVERRIDE {
     return true;
   }
 
-  virtual AllocSpace* AsAllocSpace() {
+  AllocSpace* AsAllocSpace() OVERRIDE {
     return this;
   }
 
-  virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+  void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
 
  protected:
   explicit LargeObjectSpace(const std::string& name);
@@ -85,17 +91,18 @@
 };
 
 // A discontinuous large object space implemented by individual mmap/munmap calls.
-class LargeObjectMapSpace : public LargeObjectSpace {
+class LargeObjectMapSpace FINAL : public LargeObjectSpace {
  public:
   // Creates a large object space. Allocations into the large object space use memory maps instead
   // of malloc.
   static LargeObjectMapSpace* Create(const std::string& name);
 
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj);
-  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size);
   size_t Free(Thread* self, mirror::Object* ptr);
-  void Walk(DlMallocSpace::WalkCallback, void* arg) LOCKS_EXCLUDED(lock_);
+  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
@@ -113,16 +120,18 @@
 };
 
 // A continuous large object space with a free-list to handle holes.
-class FreeListSpace : public LargeObjectSpace {
+class FreeListSpace FINAL : public LargeObjectSpace {
  public:
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
 
-  size_t AllocationSize(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
-  size_t Free(Thread* self, mirror::Object* obj);
-  bool Contains(const mirror::Object* obj) const;
-  void Walk(DlMallocSpace::WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE;
+  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
+  bool Contains(const mirror::Object* obj) const OVERRIDE;
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
 
   // Address at which the space begins.
   byte* Begin() const {
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 845b9e3..8a6636d 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -45,9 +45,10 @@
       while (requests.size() < num_allocations) {
         size_t request_size = test_rand(&rand_seed) % max_allocation_size;
         size_t allocation_size = 0;
-        mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size);
+        mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size,
+                                         nullptr);
         ASSERT_TRUE(obj != nullptr);
-        ASSERT_EQ(allocation_size, los->AllocationSize(obj));
+        ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
         ASSERT_GE(allocation_size, request_size);
         // Fill in our magic value.
         byte magic = (request_size & 0xFF) | 1;
@@ -78,7 +79,7 @@
 
     size_t bytes_allocated = 0;
     // Checks that the coalescing works.
-    mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated);
+    mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr);
     EXPECT_TRUE(obj != nullptr);
     los->Free(Thread::Current(), obj);
 
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index f17bcd2..8e34fd0 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -52,13 +52,15 @@
     return kSpaceTypeMallocSpace;
   }
 
-  // Allocate num_bytes without allowing the underlying space to grow.
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
-                                          size_t* bytes_allocated) = 0;
   // Allocate num_bytes allowing the underlying space to grow.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
-  // Return the storage space required by obj.
-  virtual size_t AllocationSize(mirror::Object* obj) = 0;
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
+                                          size_t* bytes_allocated, size_t* usable_size) = 0;
+  // Allocate num_bytes without allowing the underlying space to grow.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                size_t* usable_size) = 0;
+  // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+  // amount of the storage space that may be used by obj.
+  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
   virtual size_t Free(Thread* self, mirror::Object* ptr)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
@@ -132,9 +134,8 @@
   static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
                               size_t* growth_limit, size_t* capacity, byte* requested_begin);
 
-  // When true the low memory mode argument specifies that the heap
-  // wishes the created allocator to be more aggressive in releasing
-  // unused pages.
+  // When true the low memory mode argument specifies that the heap wishes the created allocator to
+  // be more aggressive in releasing unused pages.
   virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                                 bool low_memory_mode) = 0;
 
@@ -173,82 +174,6 @@
   DISALLOW_COPY_AND_ASSIGN(MallocSpace);
 };
 
-// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
-// after each allocation. 8 bytes provides long/double alignment.
-static constexpr size_t kValgrindRedZoneBytes = 8;
-
-// A specialization of DlMallocSpace/RosAllocSpace that provides information to valgrind wrt allocations.
-template <typename BaseMallocSpaceType, typename AllocatorType>
-class ValgrindMallocSpace : public BaseMallocSpaceType {
- public:
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
-    void* obj_with_rdz = BaseMallocSpaceType::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
-                                                              bytes_allocated);
-    if (obj_with_rdz == NULL) {
-      return NULL;
-    }
-    mirror::Object* result = reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
-    // Make redzones as no access.
-    VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
-    return result;
-  }
-
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
-    void* obj_with_rdz = BaseMallocSpaceType::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
-                                                    bytes_allocated);
-    if (obj_with_rdz == NULL) {
-     return NULL;
-    }
-    mirror::Object* result = reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
-    // Make redzones as no access.
-    VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
-    return result;
-  }
-
-  virtual size_t AllocationSize(mirror::Object* obj) {
-    size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes));
-    return result - 2 * kValgrindRedZoneBytes;
-  }
-
-  virtual size_t Free(Thread* self, mirror::Object* ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    void* obj_after_rdz = reinterpret_cast<void*>(ptr);
-    void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
-    // Make redzones undefined.
-    size_t allocation_size = BaseMallocSpaceType::AllocationSize(
-        reinterpret_cast<mirror::Object*>(obj_with_rdz));
-    VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
-    size_t freed = BaseMallocSpaceType::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
-    return freed - 2 * kValgrindRedZoneBytes;
-  }
-
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    size_t freed = 0;
-    for (size_t i = 0; i < num_ptrs; i++) {
-      freed += Free(self, ptrs[i]);
-    }
-    return freed;
-  }
-
-  ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, byte* begin,
-                      byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
-      BaseMallocSpaceType(name, mem_map, allocator, begin, end, limit, growth_limit) {
-    VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
-  }
-
-  virtual ~ValgrindMallocSpace() {
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
-};
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 5de4265..2627c85 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -25,20 +25,32 @@
 namespace gc {
 namespace space {
 
-inline mirror::Object* RosAllocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
-                                                      size_t* bytes_allocated) {
-  mirror::Object* obj;
-  obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
-  // RosAlloc zeroes memory internally.
-  return obj;
+inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
+  void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
+  // obj is a valid object. Use its class in the header to get the size.
+  // Don't use verification since the object may be dead if we are sweeping.
+  size_t size = obj->SizeOf<kVerifyNone>();
+  size_t size_by_size = rosalloc_->UsableSize(size);
+  if (kIsDebugBuild) {
+    size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr);
+    if (size_by_size != size_by_ptr) {
+      LOG(INFO) << "Found a bad sized obj of size " << size
+                << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
+                << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr;
+    }
+    DCHECK_EQ(size_by_size, size_by_ptr);
+  }
+  if (usable_size != nullptr) {
+    *usable_size = size_by_size;
+  }
+  return size_by_size;
 }
 
-inline mirror::Object* RosAllocSpace::AllocWithoutGrowthLocked(Thread* self, size_t num_bytes,
-                                                               size_t* bytes_allocated) {
+inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes,
+                                                  size_t* bytes_allocated, size_t* usable_size) {
   size_t rosalloc_size = 0;
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      rosalloc_for_alloc_->Alloc(self, num_bytes,
-                                 &rosalloc_size));
+      rosalloc_for_alloc_->Alloc(self, num_bytes, &rosalloc_size));
   if (LIKELY(result != NULL)) {
     if (kDebugSpaces) {
       CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
@@ -46,6 +58,10 @@
     }
     DCHECK(bytes_allocated != NULL);
     *bytes_allocated = rosalloc_size;
+    DCHECK_EQ(rosalloc_size, rosalloc_->UsableSize(result));
+    if (usable_size != nullptr) {
+      *usable_size = rosalloc_size;
+    }
   }
   return result;
 }
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index cc6c1d9..fe8421d 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -26,15 +26,15 @@
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-
-#include <valgrind.h>
-#include <memcheck/memcheck.h>
+#include "valgrind_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
 namespace space {
 
-static const bool kPrefetchDuringRosAllocFreeList = true;
+static constexpr bool kPrefetchDuringRosAllocFreeList = true;
+
+template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
 RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
                              art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
@@ -45,9 +45,9 @@
 }
 
 RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                               size_t starting_size,
-                                               size_t initial_size, size_t growth_limit,
-                                               size_t capacity, bool low_memory_mode) {
+                                               size_t starting_size, size_t initial_size,
+                                               size_t growth_limit, size_t capacity,
+                                               bool low_memory_mode) {
   DCHECK(mem_map != nullptr);
   allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
                                                  low_memory_mode);
@@ -63,19 +63,18 @@
   }
 
   // Everything is set so record in immutable structure and leave
-  RosAllocSpace* space;
   byte* begin = mem_map->Begin();
   if (RUNNING_ON_VALGRIND > 0) {
-    space = new ValgrindMallocSpace<RosAllocSpace, art::gc::allocator::RosAlloc*>(
+    return new ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>(
         name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size);
   } else {
-    space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
+    return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
   }
-  return space;
 }
 
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                                     size_t capacity, byte* requested_begin, bool low_memory_mode) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
+                                     size_t growth_limit, size_t capacity, byte* requested_begin,
+                                     bool low_memory_mode) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     start_time = NanoTime();
@@ -129,11 +128,8 @@
   return rosalloc;
 }
 
-mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
-  return AllocNonvirtual(self, num_bytes, bytes_allocated);
-}
-
-mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
+                                               size_t* bytes_allocated, size_t* usable_size) {
   mirror::Object* result;
   {
     MutexLock mu(self, lock_);
@@ -141,7 +137,7 @@
     size_t max_allowed = Capacity();
     rosalloc_->SetFootprintLimit(max_allowed);
     // Try the allocation.
-    result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
+    result = AllocCommon(self, num_bytes, bytes_allocated, usable_size);
     // Shrink back down as small as possible.
     size_t footprint = rosalloc_->Footprint();
     rosalloc_->SetFootprintLimit(footprint);
@@ -163,7 +159,7 @@
     CHECK(ptr != NULL);
     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
   }
-  const size_t bytes_freed = AllocationSizeNonvirtual(ptr);
+  const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
   if (kRecentFreeCount > 0) {
     MutexLock mu(self, lock_);
     RegisterRecentFree(ptr);
@@ -183,7 +179,7 @@
     if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
     }
-    bytes_freed += AllocationSizeNonvirtual(ptr);
+    bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
   }
 
   if (kRecentFreeCount > 0) {
@@ -220,10 +216,6 @@
   return rosalloc_space->MoreCore(increment);
 }
 
-size_t RosAllocSpace::AllocationSize(mirror::Object* obj) {
-  return AllocationSizeNonvirtual(obj);
-}
-
 size_t RosAllocSpace::Trim() {
   {
     MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 72e84f6..bd32196 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -30,7 +30,8 @@
 
 namespace space {
 
-// An alloc space is a space where objects may be allocated and garbage collected.
+// An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
+// overridden by a ValgrindMallocSpace.
 class RosAllocSpace : public MallocSpace {
  public:
   // Create a RosAllocSpace with the requested sizes. The requested
@@ -44,53 +45,46 @@
                                          size_t growth_limit, size_t capacity,
                                          bool low_memory_mode);
 
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
-                                          size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
-  virtual size_t AllocationSize(mirror::Object* obj);
-  virtual size_t Free(Thread* self, mirror::Object* ptr)
+  mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                  size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE {
+    return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
+  }
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+    return AllocationSizeNonvirtual(obj, usable_size);
+  }
+  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
-
-  size_t AllocationSizeNonvirtual(mirror::Object* obj)
-      NO_THREAD_SAFETY_ANALYSIS {
-    // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
-    void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
-    // obj is a valid object. Use its class in the header to get the size.
-    // Don't use verification since the object may be dead if we are sweeping.
-    size_t size = obj->SizeOf<kVerifyNone>();
-    size_t size_by_size = rosalloc_->UsableSize(size);
-    if (kIsDebugBuild) {
-      size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr);
-      if (size_by_size != size_by_ptr) {
-        LOG(INFO) << "Found a bad sized obj of size " << size
-                  << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
-                  << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr;
-      }
-      DCHECK_EQ(size_by_size, size_by_ptr);
-    }
-    return size_by_size;
+  mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                  size_t* usable_size) {
+    // RosAlloc zeroes memory internally.
+    return AllocCommon(self, num_bytes, bytes_allocated, usable_size);
   }
 
-  art::gc::allocator::RosAlloc* GetRosAlloc() {
+  // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
+  size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  allocator::RosAlloc* GetRosAlloc() const {
     return rosalloc_;
   }
 
-  size_t Trim();
-  void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
-  size_t GetFootprint();
-  size_t GetFootprintLimit();
-  void SetFootprintLimit(size_t limit);
+  size_t Trim() OVERRIDE;
+  void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+  size_t GetFootprint() OVERRIDE;
+  size_t GetFootprintLimit() OVERRIDE;
+  void SetFootprintLimit(size_t limit) OVERRIDE;
 
-  virtual void Clear();
+  void Clear() OVERRIDE;
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
                               byte* begin, byte* end, byte* limit, size_t growth_limit);
 
-  uint64_t GetBytesAllocated();
-  uint64_t GetObjectsAllocated();
+  uint64_t GetBytesAllocated() OVERRIDE;
+  uint64_t GetObjectsAllocated() OVERRIDE;
 
   void RevokeThreadLocalBuffers(Thread* thread);
   void RevokeAllThreadLocalBuffers();
@@ -98,10 +92,11 @@
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
-  virtual bool IsRosAllocSpace() const {
+  bool IsRosAllocSpace() const OVERRIDE {
     return true;
   }
-  virtual RosAllocSpace* AsRosAllocSpace() {
+
+  RosAllocSpace* AsRosAllocSpace() OVERRIDE {
     return this;
   }
 
@@ -114,9 +109,11 @@
                 byte* begin, byte* end, byte* limit, size_t growth_limit);
 
  private:
-  mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+  mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                              size_t* usable_size);
 
-  void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool low_memory_mode) {
+  void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
+                        bool low_memory_mode) OVERRIDE {
     return CreateRosAlloc(base, morecore_start, initial_size, low_memory_mode);
   }
   static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
@@ -127,11 +124,11 @@
       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
 
   // Underlying rosalloc.
-  art::gc::allocator::RosAlloc* const rosalloc_;
+  allocator::RosAlloc* const rosalloc_;
 
-  // A rosalloc pointer used for allocation. Equals to what rosalloc_
-  // points to or nullptr after InvalidateAllocator() is called.
-  art::gc::allocator::RosAlloc* rosalloc_for_alloc_;
+  // The rosalloc pointer used for allocation. Equal to rosalloc_ or nullptr after
+  // InvalidateAllocator() is called.
+  allocator::RosAlloc* rosalloc_for_alloc_;
 
   friend class collector::MarkSweep;
 
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 32a00bc..4af65a9 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -37,6 +37,36 @@
   return os;
 }
 
+DlMallocSpace* Space::AsDlMallocSpace() {
+  LOG(FATAL) << "Unreachable";
+  return nullptr;
+}
+
+RosAllocSpace* Space::AsRosAllocSpace() {
+  LOG(FATAL) << "Unreachable";
+  return nullptr;
+}
+
+ZygoteSpace* Space::AsZygoteSpace() {
+  LOG(FATAL) << "Unreachable";
+  return nullptr;
+}
+
+BumpPointerSpace* Space::AsBumpPointerSpace() {
+  LOG(FATAL) << "Unreachable";
+  return nullptr;
+}
+
+AllocSpace* Space::AsAllocSpace() {
+  LOG(FATAL) << "Unimplemented";
+  return nullptr;
+}
+
+ContinuousMemMapAllocSpace* Space::AsContinuousMemMapAllocSpace() {
+  LOG(FATAL) << "Unimplemented";
+  return nullptr;
+}
+
 DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
                                        GcRetentionPolicy gc_retention_policy) :
     Space(name, gc_retention_policy),
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 98e6f65..0f8f38a 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -115,35 +115,24 @@
   virtual bool IsDlMallocSpace() const {
     return false;
   }
-  virtual DlMallocSpace* AsDlMallocSpace() {
-    LOG(FATAL) << "Unreachable";
-    return nullptr;
-  }
+  virtual DlMallocSpace* AsDlMallocSpace();
+
   virtual bool IsRosAllocSpace() const {
     return false;
   }
-  virtual RosAllocSpace* AsRosAllocSpace() {
-    LOG(FATAL) << "Unreachable";
-    return nullptr;
-  }
+  virtual RosAllocSpace* AsRosAllocSpace();
 
-  // Is this the space allocated into by the Zygote and no-longer in use?
+  // Is this the space allocated into by the Zygote and no-longer in use for allocation?
   bool IsZygoteSpace() const {
     return GetType() == kSpaceTypeZygoteSpace;
   }
-  virtual ZygoteSpace* AsZygoteSpace() {
-    LOG(FATAL) << "Unreachable";
-    return nullptr;
-  }
+  virtual ZygoteSpace* AsZygoteSpace();
 
   // Is this space a bump pointer space?
   bool IsBumpPointerSpace() const {
     return GetType() == kSpaceTypeBumpPointerSpace;
   }
-  virtual BumpPointerSpace* AsBumpPointerSpace() {
-    LOG(FATAL) << "Unreachable";
-    return nullptr;
-  }
+  virtual BumpPointerSpace* AsBumpPointerSpace();
 
   // Does this space hold large objects and implement the large object space abstraction?
   bool IsLargeObjectSpace() const {
@@ -164,18 +153,12 @@
   virtual bool IsAllocSpace() const {
     return false;
   }
-  virtual AllocSpace* AsAllocSpace() {
-    LOG(FATAL) << "Unimplemented";
-    return nullptr;
-  }
+  virtual AllocSpace* AsAllocSpace();
 
   virtual bool IsContinuousMemMapAllocSpace() const {
     return false;
   }
-  virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
-    LOG(FATAL) << "Unimplemented";
-    return nullptr;
-  }
+  virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
 
   virtual ~Space() {}
 
@@ -220,10 +203,11 @@
   // Allocate num_bytes without allowing growth. If the allocation
   // succeeds, the output parameter bytes_allocated will be set to the
   // actually allocated bytes which is >= num_bytes.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                size_t* usable_size) = 0;
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(mirror::Object* obj) = 0;
+  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
 
   // Returns how many bytes were freed.
   virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
@@ -231,15 +215,13 @@
   // Returns how many bytes were freed.
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
 
-  // Revoke any sort of thread-local buffers that are used to speed up
-  // allocations for the given thread, if the alloc space
-  // implementation uses any. No-op by default.
-  virtual void RevokeThreadLocalBuffers(Thread* /*thread*/) {}
+  // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
+  // thread, if the alloc space implementation uses any.
+  virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
 
-  // Revoke any sort of thread-local buffers that are used to speed up
-  // allocations for all the threads, if the alloc space
-  // implementation uses any. No-op by default.
-  virtual void RevokeAllThreadLocalBuffers() {}
+  // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
+  // threads, if the alloc space implementation uses any.
+  virtual void RevokeAllThreadLocalBuffers() = 0;
 
  protected:
   AllocSpace() {}
@@ -393,17 +375,17 @@
 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
  public:
-  virtual bool IsAllocSpace() const {
+  bool IsAllocSpace() const OVERRIDE {
     return true;
   }
-  virtual AllocSpace* AsAllocSpace() {
+  AllocSpace* AsAllocSpace() OVERRIDE {
     return this;
   }
 
-  virtual bool IsContinuousMemMapAllocSpace() const {
+  bool IsContinuousMemMapAllocSpace() const OVERRIDE {
     return true;
   }
-  virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
+  ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
     return this;
   }
 
@@ -414,22 +396,19 @@
   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
   void SwapBitmaps();
 
-  virtual void Clear() {
-    LOG(FATAL) << "Unimplemented";
-  }
+  // Free all memory associated with this space.
+  virtual void Clear() = 0;
 
-  virtual accounting::SpaceBitmap* GetLiveBitmap() const {
+  accounting::SpaceBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
-  virtual accounting::SpaceBitmap* GetMarkBitmap() const {
+
+  accounting::SpaceBitmap* GetMarkBitmap() const {
     return mark_bitmap_.get();
   }
 
-  virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
-  virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
-    LOG(FATAL) << "Unimplemented";
-    return nullptr;
-  }
+  void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+  virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() = 0;
 
  protected:
   UniquePtr<accounting::SpaceBitmap> live_bitmap_;
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 093967e..cb036f8 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -19,19 +19,19 @@
 
 #include "zygote_space.h"
 
-#include "common_test.h"
+#include <stdint.h>
+
+#include "common_runtime_test.h"
 #include "globals.h"
 #include "UniquePtr.h"
 #include "mirror/array-inl.h"
 #include "mirror/object-inl.h"
 
-#include <stdint.h>
-
 namespace art {
 namespace gc {
 namespace space {
 
-class SpaceTest : public CommonTest {
+class SpaceTest : public CommonRuntimeTest {
  public:
   void AddSpace(ContinuousSpace* space) {
     // For RosAlloc, revoke the thread local runs before moving onto a
@@ -49,6 +49,9 @@
                                                                                       null_loader);
     EXPECT_TRUE(byte_array_class != nullptr);
     o->SetClass(byte_array_class);
+    if (kUseBrooksPointer) {
+      o->SetBrooksPointer(o.get());
+    }
     mirror::Array* arr = o->AsArray<kVerifyNone>();
     size_t header_size = SizeOfZeroLengthByteArray();
     int32_t length = size - header_size;
@@ -120,7 +123,7 @@
 // allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
 // the GC works with the ZygoteSpace.
 void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
-  size_t dummy = 0;
+  size_t dummy;
   MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
   ASSERT_TRUE(space != nullptr);
 
@@ -130,47 +133,60 @@
   ScopedObjectAccess soa(self);
 
   // Succeeds, fits without adjusting the footprint limit.
-  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+  size_t ptr1_bytes_allocated, ptr1_usable_size;
+  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &ptr1_bytes_allocated,
+                                                  &ptr1_usable_size));
   EXPECT_TRUE(ptr1.get() != nullptr);
+  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
+  EXPECT_LE(1U * MB, ptr1_usable_size);
+  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
+  mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
-  size_t ptr3_bytes_allocated;
-  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+  size_t ptr3_bytes_allocated, ptr3_usable_size;
+  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated,
+                                                            &ptr3_usable_size));
   EXPECT_TRUE(ptr3.get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
+  EXPECT_LE(8U * MB, ptr3_usable_size);
+  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
   InstallClass(ptr3, 8 * MB);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
+  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr4 == nullptr);
 
   // Also fails, requires a higher allowed footprint.
-  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
+  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  size_t free3 = space->AllocationSize(ptr3.get());
+  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
   EXPECT_EQ(free3, ptr3_bytes_allocated);
   EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
-  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+  size_t ptr6_bytes_allocated, ptr6_usable_size;
+  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &ptr6_bytes_allocated,
+                                                            &ptr6_usable_size));
   EXPECT_TRUE(ptr6.get() != nullptr);
+  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
+  EXPECT_LE(9U * MB, ptr6_usable_size);
+  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
   InstallClass(ptr6, 9 * MB);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1.get());
+  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
   space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 
   // Make sure that the zygote space isn't directly at the start of the space.
-  space->Alloc(self, 1U * MB, &dummy);
+  EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
 
   gc::Heap* heap = Runtime::Current()->GetHeap();
   space::Space* old_space = space;
@@ -186,22 +202,28 @@
   AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  ptr1.reset(space->Alloc(self, 1 * MB, &dummy));
+  ptr1.reset(space->Alloc(self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
   EXPECT_TRUE(ptr1.get() != nullptr);
+  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
+  EXPECT_LE(1U * MB, ptr1_usable_size);
+  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
-  ptr2 = space->Alloc(self, 8 * MB, &dummy);
+  ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
-  ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &dummy));
+  ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
   EXPECT_TRUE(ptr3.get() != nullptr);
+  EXPECT_LE(2U * MB, ptr3_bytes_allocated);
+  EXPECT_LE(2U * MB, ptr3_usable_size);
+  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
   InstallClass(ptr3, 2 * MB);
   space->Free(self, ptr3.reset(nullptr));
 
   // Final clean up.
-  free1 = space->AllocationSize(ptr1.get());
+  free1 = space->AllocationSize(ptr1.get(), nullptr);
   space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
@@ -217,42 +239,55 @@
   AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+  size_t ptr1_bytes_allocated, ptr1_usable_size;
+  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &ptr1_bytes_allocated,
+                                                  &ptr1_usable_size));
   EXPECT_TRUE(ptr1.get() != nullptr);
+  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
+  EXPECT_LE(1U * MB, ptr1_usable_size);
+  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
+  mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
-  size_t ptr3_bytes_allocated;
-  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+  size_t ptr3_bytes_allocated, ptr3_usable_size;
+  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated,
+                                                            &ptr3_usable_size));
   EXPECT_TRUE(ptr3.get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
+  EXPECT_LE(8U * MB, ptr3_usable_size);
+  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
   InstallClass(ptr3, 8 * MB);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
+  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr4 == nullptr);
 
   // Also fails, requires a higher allowed footprint.
-  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
+  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
   EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  size_t free3 = space->AllocationSize(ptr3.get());
+  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
   EXPECT_EQ(free3, ptr3_bytes_allocated);
   space->Free(self, ptr3.reset(nullptr));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
-  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+  size_t ptr6_bytes_allocated, ptr6_usable_size;
+  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &ptr6_bytes_allocated,
+                                                            &ptr6_usable_size));
   EXPECT_TRUE(ptr6.get() != nullptr);
+  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
+  EXPECT_LE(9U * MB, ptr6_usable_size);
+  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
   InstallClass(ptr6, 9 * MB);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1.get());
+  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
   space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
@@ -269,14 +304,17 @@
   // Succeeds, fits without adjusting the max allowed footprint.
   mirror::Object* lots_of_objects[1024];
   for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-    size_t allocation_size = 0;
+    size_t allocation_size, usable_size;
     size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
-    lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size);
+    lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size,
+                                      &usable_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
     SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
     InstallClass(obj, size_of_zero_length_byte_array);
     lots_of_objects[i] = obj.get();
-    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
+    size_t computed_usable_size;
+    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
+    EXPECT_EQ(usable_size, computed_usable_size);
   }
 
   // Release memory and check pointers are nullptr.
@@ -287,13 +325,15 @@
 
   // Succeeds, fits by adjusting the max allowed footprint.
   for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-    size_t allocation_size = 0;
-    lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
+    size_t allocation_size, usable_size;
+    lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size, &usable_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
     SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
     InstallClass(obj, 1024);
     lots_of_objects[i] = obj.get();
-    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
+    size_t computed_usable_size;
+    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
+    EXPECT_EQ(usable_size, computed_usable_size);
   }
 
   // Release memory and check pointers are nullptr
@@ -354,16 +394,16 @@
       SirtRef<mirror::Object> object(self, nullptr);
       size_t bytes_allocated = 0;
       if (round <= 1) {
-        object.reset(space->Alloc(self, alloc_size, &bytes_allocated));
+        object.reset(space->Alloc(self, alloc_size, &bytes_allocated, nullptr));
       } else {
-        object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated));
+        object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated, nullptr));
       }
       footprint = space->GetFootprint();
       EXPECT_GE(space->Size(), footprint);  // invariant
       if (object.get() != nullptr) {  // allocation succeeded
         InstallClass(object, alloc_size);
         lots_of_objects[i] = object.get();
-        size_t allocation_size = space->AllocationSize(object.get());
+        size_t allocation_size = space->AllocationSize(object.get(), nullptr);
         EXPECT_EQ(bytes_allocated, allocation_size);
         if (object_size > 0) {
           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -418,7 +458,7 @@
       if (object == nullptr) {
         continue;
       }
-      size_t allocation_size = space->AllocationSize(object);
+      size_t allocation_size = space->AllocationSize(object, nullptr);
       if (object_size > 0) {
         EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
       } else {
@@ -447,9 +487,10 @@
   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
   size_t bytes_allocated = 0;
   if (round <= 1) {
-    large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated));
+    large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated, nullptr));
   } else {
-    large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated));
+    large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated,
+                                              nullptr));
   }
   EXPECT_TRUE(large_object.get() != nullptr);
   InstallClass(large_object, three_quarters_space);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
new file mode 100644
index 0000000..4b0c8e3
--- /dev/null
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+
+#include "valgrind_malloc_space.h"
+
+#include <memcheck/memcheck.h>
+
+namespace art {
+namespace gc {
+namespace space {
+
+// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
+// after each allocation. 8 bytes provides long/double alignment.
+static constexpr size_t kValgrindRedZoneBytes = 8;
+
+template <typename S, typename A>
+mirror::Object* ValgrindMallocSpace<S, A>::AllocWithGrowth(Thread* self, size_t num_bytes,
+                                                           size_t* bytes_allocated,
+                                                           size_t* usable_size) {
+  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
+                                          bytes_allocated, usable_size);
+  if (obj_with_rdz == nullptr) {
+    return nullptr;
+  }
+  if (usable_size != nullptr) {
+    *usable_size -= 2 * kValgrindRedZoneBytes;
+  }
+  mirror::Object* result = reinterpret_cast<mirror::Object*>(
+      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+  // Make redzones as no access.
+  VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  return result;
+}
+
+template <typename S, typename A>
+mirror::Object* ValgrindMallocSpace<S, A>::Alloc(Thread* self, size_t num_bytes,
+                                                 size_t* bytes_allocated,
+                                                 size_t* usable_size) {
+  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes, bytes_allocated,
+                                usable_size);
+  if (obj_with_rdz == nullptr) {
+    return nullptr;
+  }
+  if (usable_size != nullptr) {
+    *usable_size -= 2 * kValgrindRedZoneBytes;
+  }
+  mirror::Object* result = reinterpret_cast<mirror::Object*>(
+      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+  // Make redzones as no access.
+  VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  return result;
+}
+
+template <typename S, typename A>
+size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+  size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
+      reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes), usable_size);
+  if (usable_size != nullptr) {
+    *usable_size -= 2 * kValgrindRedZoneBytes;
+  }
+  return result - 2 * kValgrindRedZoneBytes;
+}
+
+template <typename S, typename A>
+size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) {
+  void* obj_after_rdz = reinterpret_cast<void*>(ptr);
+  void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+  // Make redzones undefined.
+  size_t allocation_size =
+      AllocationSize(reinterpret_cast<mirror::Object*>(obj_with_rdz), nullptr);
+  VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
+  size_t freed = S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+  return freed - 2 * kValgrindRedZoneBytes;
+}
+
+template <typename S, typename A>
+size_t ValgrindMallocSpace<S, A>::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+  size_t freed = 0;
+  for (size_t i = 0; i < num_ptrs; i++) {
+    freed += Free(self, ptrs[i]);
+  }
+  return freed;
+}
+
+template <typename S, typename A>
+ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
+                                               A allocator, byte* begin,
+                                               byte* end, byte* limit, size_t growth_limit,
+                                               size_t initial_size) :
+    S(name, mem_map, allocator, begin, end, limit, growth_limit) {
+  VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
new file mode 100644
index 0000000..8d00b30
--- /dev/null
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+
+#include "malloc_space.h"
+
+#include <valgrind.h>
+
+namespace art {
+namespace gc {
+namespace space {
+
+// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around
+// allocations.
+template <typename BaseMallocSpaceType, typename AllocatorType>
+class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
+ public:
+  mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                  size_t* usable_size) OVERRIDE;
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE;
+
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+
+  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
+                      byte* begin, byte* end, byte* limit, size_t growth_limit,
+                      size_t initial_size);
+  virtual ~ValgrindMallocSpace() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a303765..a60ab38 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -57,6 +57,10 @@
   return zygote_space;
 }
 
+void ZygoteSpace::Clear() {
+  LOG(FATAL) << "Unimplemented";
+}
+
 ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
                                  kGcRetentionPolicyFullCollect),
@@ -71,6 +75,27 @@
       << ",name=\"" << GetName() << "\"]";
 }
 
+mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                   size_t* usable_size) {
+  LOG(FATAL) << "Unimplemented";
+  return nullptr;
+}
+
+size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+  LOG(FATAL) << "Unimplemented";
+  return 0;
+}
+
+size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+  LOG(FATAL) << "Unimplemented";
+  return 0;
+}
+
+size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+  LOG(FATAL) << "Unimplemented";
+  return 0;
+}
+
 void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
   DCHECK(context->space->IsZygoteSpace());
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index e0035b3..8cd1a9f 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -30,7 +30,7 @@
 namespace space {
 
 // An zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace : public ContinuousMemMapAllocSpace {
+class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
  public:
   // Returns the remaining storage in the out_map field.
   static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
@@ -39,40 +39,40 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) const;
-  virtual SpaceType GetType() const {
+
+  SpaceType GetType() const OVERRIDE {
     return kSpaceTypeZygoteSpace;
   }
-  virtual ZygoteSpace* AsZygoteSpace() {
+
+  ZygoteSpace* AsZygoteSpace() OVERRIDE {
     return this;
   }
-  virtual mirror::Object* AllocWithGrowth(Thread* /*self*/, size_t /*num_bytes*/,
-                                          size_t* /*bytes_allocated*/) {
-    LOG(FATAL) << "Unimplemented";
-    return nullptr;
+
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                        size_t* usable_size) OVERRIDE;
+
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+
+  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+
+  // ZygoteSpaces don't have thread local state.
+  void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
   }
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
-    LOG(FATAL) << "Unimplemented";
-    return nullptr;
+  void RevokeAllThreadLocalBuffers() OVERRIDE {
   }
-  virtual size_t AllocationSize(mirror::Object* obj) {
-    LOG(FATAL) << "Unimplemented";
-    return 0;
-  }
-  virtual size_t Free(Thread* self, mirror::Object* ptr) {
-    LOG(FATAL) << "Unimplemented";
-    return 0;
-  }
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
-    LOG(FATAL) << "Unimplemented";
-    return 0;
-  }
-  virtual uint64_t GetBytesAllocated() {
+
+  uint64_t GetBytesAllocated() {
     return Size();
   }
-  virtual uint64_t GetObjectsAllocated() {
+
+  uint64_t GetObjectsAllocated() {
     return objects_allocated_;
   }
 
+  void Clear();
+
  protected:
   virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
     return &SweepCallback;
diff --git a/runtime/globals.h b/runtime/globals.h
index 8c3ae56..83e3028 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -19,6 +19,7 @@
 
 #include <stddef.h>
 #include <stdint.h>
+#include "brooks_pointer.h"
 
 namespace art {
 
@@ -92,6 +93,12 @@
 // code, if possible.
 static constexpr bool kEmbedClassInCode = true;
 
+#ifdef USE_BROOKS_POINTER
+static constexpr bool kUseBrooksPointer = true;
+#else
+static constexpr bool kUseBrooksPointer = false;
+#endif
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 78e1992..9b42e59 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -14,15 +14,14 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
-
 #include "indirect_reference_table.h"
+
+#include "common_runtime_test.h"
 #include "mirror/object-inl.h"
 
 namespace art {
 
-class IndirectReferenceTableTest : public CommonTest {
-};
+class IndirectReferenceTableTest : public CommonRuntimeTest {};
 
 static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index c328245..8987127 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -16,13 +16,13 @@
 
 #include "intern_table.h"
 
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "mirror/object.h"
 #include "sirt_ref.h"
 
 namespace art {
 
-class InternTableTest : public CommonTest {};
+class InternTableTest : public CommonRuntimeTest {};
 
 TEST_F(InternTableTest, Intern) {
   ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index cb9e2e8..40d4ea3 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -372,22 +372,12 @@
   void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
   ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, last_shadow_frame, method, 0, memory));
   self->PushShadowFrame(shadow_frame);
-  self->EndAssertNoThreadSuspension(old_cause);
 
   size_t cur_reg = num_regs - num_ins;
   if (!method->IsStatic()) {
     CHECK(receiver != NULL);
     shadow_frame->SetVRegReference(cur_reg, receiver);
     ++cur_reg;
-  } else if (UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) {
-    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-    SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
-    if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) {
-      CHECK(self->IsExceptionPending());
-      self->PopShadowFrame();
-      return;
-    }
-    CHECK(sirt_c->IsInitializing());
   }
   const char* shorty = mh.GetShorty();
   for (size_t shorty_pos = 0, arg_pos = 0; cur_reg < num_regs; ++shorty_pos, ++arg_pos, cur_reg++) {
@@ -410,6 +400,17 @@
         break;
     }
   }
+  self->EndAssertNoThreadSuspension(old_cause);
+  // Do this after populating the shadow frame in case EnsureInitialized causes a GC.
+  if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) {
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
+    if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) {
+      CHECK(self->IsExceptionPending());
+      self->PopShadowFrame();
+      return;
+    }
+  }
   if (LIKELY(!method->IsNative())) {
     JValue r = Execute(self, mh, code_item, *shadow_frame, JValue());
     if (result != NULL) {
@@ -418,6 +419,9 @@
   } else {
     // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
     // generated stub) except during testing and image writing.
+    // Update args to be the args in the shadow frame since the input ones could hold stale
+    // references pointers due to moving GC.
+    args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
     if (!Runtime::Current()->IsStarted()) {
       UnstartedRuntimeJni(self, method, receiver, args, result);
     } else {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 83a1fbc..f76d50c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -197,7 +197,8 @@
     }
     return false;
   }
-  Object* newArray = Array::Alloc<true>(self, arrayClass, length);
+  Object* newArray = Array::Alloc<true>(self, arrayClass, length, arrayClass->GetComponentSize(),
+                                        Runtime::Current()->GetHeap()->GetCurrentAllocator());
   if (UNLIKELY(newArray == NULL)) {
     DCHECK(self->IsExceptionPending());
     return false;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 76aa734..37fb2db 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -263,7 +263,7 @@
   // See if the override ClassLoader is set for gtests.
   class_loader = soa.Self()->GetClassLoaderOverride();
   if (class_loader != nullptr) {
-    // If so, CommonTest should have set UseCompileTimeClassPath.
+    // If so, CommonCompilerTest should have set UseCompileTimeClassPath.
     CHECK(Runtime::Current()->UseCompileTimeClassPath());
     return class_loader;
   }
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 63bc45c..28408d2 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -20,7 +20,7 @@
 #include <cfloat>
 #include <cmath>
 
-#include "common_test.h"
+#include "common_compiler_test.h"
 #include "invoke_arg_array_builder.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
@@ -31,10 +31,11 @@
 
 namespace art {
 
-class JniInternalTest : public CommonTest {
+// TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
+class JniInternalTest : public CommonCompilerTest {
  protected:
   virtual void SetUp() {
-    CommonTest::SetUp();
+    CommonCompilerTest::SetUp();
 
     vm_ = Runtime::Current()->GetJavaVM();
 
@@ -75,7 +76,7 @@
 
   virtual void TearDown() {
     CleanUpJniEnv();
-    CommonTest::TearDown();
+    CommonCompilerTest::TearDown();
   }
 
   jclass GetPrimitiveClass(char descriptor) {
@@ -2070,7 +2071,7 @@
 
   jint err = vm_->DetachCurrentThread();
   EXPECT_EQ(JNI_ERR, err);
-  vm_->AttachCurrentThread(&env_, NULL);  // need attached thread for CommonTest::TearDown
+  vm_->AttachCurrentThread(&env_, NULL);  // need attached thread for CommonRuntimeTest::TearDown
 }
 
 }  // namespace art
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 7a7d38d..0e80fe2 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -112,6 +112,88 @@
   return (x * 37) >> 8;
 }
 
+static inline uint8_t* EncodeUnsignedLeb128(uint8_t* dest, uint32_t value) {
+  uint8_t out = value & 0x7f;
+  value >>= 7;
+  while (value != 0) {
+    *dest++ = out | 0x80;
+    out = value & 0x7f;
+    value >>= 7;
+  }
+  *dest++ = out;
+  return dest;
+}
+
+static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
+  uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
+  uint8_t out = value & 0x7f;
+  while (extra_bits != 0u) {
+    *dest++ = out | 0x80;
+    value >>= 7;
+    out = value & 0x7f;
+    extra_bits >>= 7;
+  }
+  *dest++ = out;
+  return dest;
+}
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class Leb128EncodingVector {
+ public:
+  Leb128EncodingVector() {
+  }
+
+  void Reserve(uint32_t size) {
+    data_.reserve(size);
+  }
+
+  void PushBackUnsigned(uint32_t value) {
+    uint8_t out = value & 0x7f;
+    value >>= 7;
+    while (value != 0) {
+      data_.push_back(out | 0x80);
+      out = value & 0x7f;
+      value >>= 7;
+    }
+    data_.push_back(out);
+  }
+
+  template<typename It>
+  void InsertBackUnsigned(It cur, It end) {
+    for (; cur != end; ++cur) {
+      PushBackUnsigned(*cur);
+    }
+  }
+
+  void PushBackSigned(int32_t value) {
+    uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
+    uint8_t out = value & 0x7f;
+    while (extra_bits != 0u) {
+      data_.push_back(out | 0x80);
+      value >>= 7;
+      out = value & 0x7f;
+      extra_bits >>= 7;
+    }
+    data_.push_back(out);
+  }
+
+  template<typename It>
+  void InsertBackSigned(It cur, It end) {
+    for (; cur != end; ++cur) {
+      PushBackSigned(*cur);
+    }
+  }
+
+  const std::vector<uint8_t>& GetData() const {
+    return data_;
+  }
+
+ private:
+  std::vector<uint8_t> data_;
+
+  DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
+};
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_LEB128_H_
diff --git a/compiler/leb128_encoder_test.cc b/runtime/leb128_test.cc
similarity index 98%
rename from compiler/leb128_encoder_test.cc
rename to runtime/leb128_test.cc
index 7af8518..d75d5c2 100644
--- a/compiler/leb128_encoder_test.cc
+++ b/runtime/leb128_test.cc
@@ -15,7 +15,6 @@
  */
 
 #include "leb128.h"
-#include "leb128_encoder.h"
 
 #include "gtest/gtest.h"
 #include "base/histogram-inl.h"
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 90aaccd..8158bc5 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,6 +27,10 @@
 namespace art {
 namespace mirror {
 
+static inline size_t HeaderSize(size_t component_size) {
+  return sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
+}
+
 template<VerifyObjectFlags kVerifyFlags>
 inline size_t Array::SizeOf() {
   // This is safe from overflow because the array was already allocated, so we know it's sane.
@@ -34,7 +38,7 @@
   // Don't need to check this since we already check this in GetClass.
   int32_t component_count =
       GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>();
-  size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
+  size_t header_size = HeaderSize(component_size);
   size_t data_size = component_count * component_size;
   return header_size + data_size;
 }
@@ -46,7 +50,7 @@
   DCHECK_GE(component_count, 0);
   DCHECK(array_class->IsArrayClass());
 
-  size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
+  size_t header_size = HeaderSize(component_size);
   size_t data_size = component_count * component_size;
   size_t size = header_size + data_size;
 
@@ -61,13 +65,16 @@
   return size;
 }
 
-// Used for setting the array length in the allocation code path to ensure it is guarded by a CAS.
+// Used for setting the array length in the allocation code path to ensure it is guarded by a
+// StoreStore fence.
 class SetLengthVisitor {
  public:
   explicit SetLengthVisitor(int32_t length) : length_(length) {
   }
 
-  void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void operator()(Object* obj, size_t usable_size) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    UNUSED(usable_size);
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     Array* array = down_cast<Array*>(obj);
     // DCHECK(array->IsArrayInstance());
@@ -76,41 +83,72 @@
 
  private:
   const int32_t length_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetLengthVisitor);
+};
+
+// Similar to SetLengthVisitor, used for setting the array length to fill the usable size of an
+// array.
+class SetLengthToUsableSizeVisitor {
+ public:
+  SetLengthToUsableSizeVisitor(int32_t min_length, size_t header_size, size_t component_size) :
+      minimum_length_(min_length), header_size_(header_size), component_size_(component_size) {
+  }
+
+  void operator()(Object* obj, size_t usable_size) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
+    Array* array = down_cast<Array*>(obj);
+    // DCHECK(array->IsArrayInstance());
+    int32_t length = (usable_size - header_size_) / component_size_;
+    DCHECK_GE(length, minimum_length_);
+    byte* old_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, minimum_length_));
+    byte* new_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, length));
+    // Ensure space beyond original allocation is zeroed.
+    memset(old_end, 0, new_end - old_end);
+    array->SetLength(length);
+  }
+
+ private:
+  const int32_t minimum_length_;
+  const size_t header_size_;
+  const size_t component_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor);
 };
 
 template <bool kIsInstrumented>
 inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
-                           size_t component_size, gc::AllocatorType allocator_type) {
+                           size_t component_size, gc::AllocatorType allocator_type,
+                           bool fill_usable) {
+  DCHECK(allocator_type != gc::kAllocatorTypeLOS);
   size_t size = ComputeArraySize(self, array_class, component_count, component_size);
   if (UNLIKELY(size == 0)) {
     return nullptr;
   }
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  SetLengthVisitor visitor(component_count);
-  DCHECK(allocator_type != gc::kAllocatorTypeLOS);
-  return down_cast<Array*>(
-      heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
-                                                            allocator_type, visitor));
-}
-
-template <bool kIsInstrumented>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
-                           gc::AllocatorType allocator_type) {
-  DCHECK(array_class->IsArrayClass());
-  return Alloc<kIsInstrumented>(self, array_class, component_count, array_class->GetComponentSize(),
-                                allocator_type);
-}
-template <bool kIsInstrumented>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
-  return Alloc<kIsInstrumented>(self, array_class, component_count,
-               Runtime::Current()->GetHeap()->GetCurrentAllocator());
-}
-
-template <bool kIsInstrumented>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
-                           size_t component_size) {
-  return Alloc<kIsInstrumented>(self, array_class, component_count, component_size,
-               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  Array* result;
+  if (!fill_usable) {
+    SetLengthVisitor visitor(component_count);
+    result = down_cast<Array*>(
+        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
+                                                              allocator_type, visitor));
+  } else {
+    SetLengthToUsableSizeVisitor visitor(component_count, HeaderSize(component_size),
+                                         component_size);
+    result = down_cast<Array*>(
+        heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
+                                                              allocator_type, visitor));
+  }
+  if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
+    CHECK_EQ(array_class->GetComponentSize(), component_size);
+    if (!fill_usable) {
+      CHECK_EQ(result->SizeOf(), size);
+    } else {
+      CHECK_GE(result->SizeOf(), size);
+    }
+  }
+  return result;
 }
 
 template<class T>
@@ -133,9 +171,17 @@
   }
 }
 
+template<typename T>
+inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
+  DCHECK(array_class_ != NULL);
+  Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
+                                        Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  return down_cast<PrimitiveArray<T>*>(raw_array);
+}
+
 template<class T>
-void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
-                                int32_t count) {
+inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+                                       int32_t count) {
   if (UNLIKELY(count == 0)) {
     return;
   }
@@ -192,8 +238,8 @@
 
 
 template<class T>
-void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
-                               int32_t count) {
+inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+                                      int32_t count) {
   if (UNLIKELY(count == 0)) {
     return;
   }
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 715f072..139e2d0 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -46,7 +46,9 @@
                                         const SirtRef<mirror::IntArray>& dimensions)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
-  SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length));
+  SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length,
+                                                    array_class->GetComponentSize(),
+                                                    Runtime::Current()->GetHeap()->GetCurrentAllocator()));
   if (UNLIKELY(new_array.get() == nullptr)) {
     CHECK(self->IsExceptionPending());
     return nullptr;
@@ -117,13 +119,6 @@
   art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
 }
 
-template<typename T>
-PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
-  DCHECK(array_class_ != NULL);
-  Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T));
-  return down_cast<PrimitiveArray<T>*>(raw_array);
-}
-
 template <typename T> Class* PrimitiveArray<T>::array_class_ = NULL;
 
 // Explicitly instantiate all the primitive array types.
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index c4f9a75..772d303 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -28,25 +28,13 @@
 
 class MANAGED Array : public Object {
  public:
-  // A convenience for code that doesn't know the component size, and doesn't want to have to work
-  // it out itself.
+  // Allocates an array with the given properties, if fill_usable is true the array will be of at
+  // least component_count size, however, if there's usable space at the end of the allocation the
+  // array will fill it.
   template <bool kIsInstrumented>
   static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
-                      gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  template <bool kIsInstrumented>
-  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
-                      size_t component_size, gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  template <bool kIsInstrumented>
-  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  template <bool kIsInstrumented>
-  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
-                      size_t component_size)
+                      size_t component_size, gc::AllocatorType allocator_type,
+                      bool fill_usable = false)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Array* CreateMultiArray(Thread* self, const SirtRef<Class>& element_class,
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 6bed224..fef1f9b 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -14,20 +14,21 @@
  * limitations under the License.
  */
 
-#include "class_linker.h"
-#include "common_test.h"
 #include "dex_cache.h"
+
+#include <stdio.h>
+
+#include "class_linker.h"
+#include "common_runtime_test.h"
 #include "gc/heap.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
 #include "sirt_ref.h"
 
-#include <stdio.h>
-
 namespace art {
 namespace mirror {
 
-class DexCacheTest : public CommonTest {};
+class DexCacheTest : public CommonRuntimeTest {};
 
 TEST_F(DexCacheTest, Open) {
   ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index df8104d..478cc36 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -92,6 +92,38 @@
   Monitor::Wait(self, this, ms, ns, true, kTimedWaiting);
 }
 
+inline Object* Object::GetBrooksPointer() {
+#ifdef USE_BROOKS_POINTER
+  DCHECK(kUseBrooksPointer);
+  return GetFieldObject<Object, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Object, x_brooks_ptr_), false);
+#else
+  LOG(FATAL) << "Unreachable";
+  return nullptr;
+#endif
+}
+
+inline void Object::SetBrooksPointer(Object* brooks_pointer) {
+#ifdef USE_BROOKS_POINTER
+  DCHECK(kUseBrooksPointer);
+  // We don't mark the card as this occurs as part of object allocation. Not all objects have
+  // backing cards, such as large objects.
+  SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
+      OFFSET_OF_OBJECT_MEMBER(Object, x_brooks_ptr_), brooks_pointer, false);
+#else
+  LOG(FATAL) << "Unreachable";
+#endif
+}
+
+inline void Object::AssertSelfBrooksPointer() const {
+#ifdef USE_BROOKS_POINTER
+  DCHECK(kUseBrooksPointer);
+  Object* obj = const_cast<Object*>(this);
+  DCHECK_EQ(obj, obj->GetBrooksPointer());
+#else
+  LOG(FATAL) << "Unreachable";
+#endif
+}
+
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::VerifierInstanceOf(Class* klass) {
   DCHECK(klass != NULL);
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 7487dd2..ded4e0a 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -76,6 +76,10 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  Object* GetBrooksPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetBrooksPointer(Object* brooks_pointer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AssertSelfBrooksPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
   // invoke-interface to detect incompatible interface types.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -260,6 +264,14 @@
   // Monitor and hash code information.
   uint32_t monitor_;
 
+#ifdef USE_BROOKS_POINTER
+  // Note names use a 'x' prefix and the x_brooks_ptr_ is of type int
+  // instead of Object to go with the alphabetical/by-type field order
+  // on the Java side.
+  uint32_t x_brooks_ptr_;  // For the Brooks pointer.
+  uint32_t x_padding_;     // For 8-byte alignment. TODO: get rid of this.
+#endif
+
   friend class art::ImageWriter;
   friend class art::Monitor;
   friend struct art::ObjectOffsets;  // for verifying offset information
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 34fb15e..7d8da14 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -25,7 +25,7 @@
 #include "class-inl.h"
 #include "class_linker.h"
 #include "class_linker-inl.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "dex_file.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "gc/accounting/card_table-inl.h"
@@ -40,7 +40,7 @@
 namespace art {
 namespace mirror {
 
-class ObjectTest : public CommonTest {
+class ObjectTest : public CommonRuntimeTest {
  protected:
   void AssertString(int32_t expected_utf16_length,
                     const char* utf8_in,
@@ -148,16 +148,52 @@
 TEST_F(ObjectTest, AllocArray) {
   ScopedObjectAccess soa(Thread::Current());
   Class* c = class_linker_->FindSystemClass(soa.Self(), "[I");
-  SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1));
-  ASSERT_TRUE(c == a->GetClass());
+  SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+                                                  Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_EQ(1, a->GetLength());
 
   c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
-  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
-  ASSERT_TRUE(c == a->GetClass());
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+                             Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_EQ(1, a->GetLength());
 
   c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
-  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
-  ASSERT_TRUE(c == a->GetClass());
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+                             Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_EQ(1, a->GetLength());
+}
+
+TEST_F(ObjectTest, AllocArray_FillUsable) {
+  ScopedObjectAccess soa(Thread::Current());
+  Class* c = class_linker_->FindSystemClass(soa.Self(), "[B");
+  SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+                                                  Runtime::Current()->GetHeap()->GetCurrentAllocator(),
+                                                  true));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_LE(1, a->GetLength());
+
+  c = class_linker_->FindSystemClass(soa.Self(), "[I");
+  a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+                             Runtime::Current()->GetHeap()->GetCurrentAllocator(),
+                             true));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_LE(2, a->GetLength());
+
+  c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
+  a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+                             Runtime::Current()->GetHeap()->GetCurrentAllocator(),
+                             true));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_LE(2, a->GetLength());
+
+  c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
+  a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+                             Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
+  EXPECT_TRUE(c == a->GetClass());
+  EXPECT_LE(2, a->GetLength());
 }
 
 template<typename ArrayT>
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5779442..3c703ba 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -72,7 +72,7 @@
   }
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
   mirror::Array* result = mirror::Array::Alloc<true>(soa.Self(), array_class, length,
-                                                     allocator);
+                                                     array_class->GetComponentSize(), allocator);
   return soa.AddLocalReference<jobject>(result);
 }
 
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index fc30aa6..a991818 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -50,14 +50,17 @@
     ThrowNegativeArraySizeException(length);
     return NULL;
   }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
   mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), element_class);
   if (UNLIKELY(array_class == NULL)) {
     CHECK(soa.Self()->IsExceptionPending());
     return NULL;
   }
-  DCHECK(array_class->IsArrayClass());
-  mirror::Array* new_array = mirror::Array::Alloc<true>(soa.Self(), array_class, length);
+  DCHECK(array_class->IsObjectArrayClass());
+  mirror::Array* new_array = mirror::Array::Alloc<true>(soa.Self(), array_class, length,
+                                                        sizeof(mirror::HeapReference<mirror::Object>),
+                                                        runtime->GetHeap()->GetCurrentAllocator());
   return soa.AddLocalReference<jobject>(new_array);
 }
 
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 16fbd94..3229039 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -14,14 +14,13 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
-
 #include "reference_table.h"
 
+#include "common_runtime_test.h"
+
 namespace art {
 
-class ReferenceTableTest : public CommonTest {
-};
+class ReferenceTableTest : public CommonRuntimeTest {};
 
 TEST_F(ReferenceTableTest, Basics) {
   ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/runtime_test.cc b/runtime/runtime_test.cc
index d53b4a6..5b881e5 100644
--- a/runtime/runtime_test.cc
+++ b/runtime/runtime_test.cc
@@ -17,11 +17,11 @@
 #include "runtime.h"
 
 #include "UniquePtr.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
-class RuntimeTest : public CommonTest {};
+class RuntimeTest : public CommonRuntimeTest {};
 
 TEST_F(RuntimeTest, ParsedOptions) {
   void* test_vfprintf = reinterpret_cast<void*>(0xa);
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index c6f0e92..c1a1ad7 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -14,12 +14,12 @@
  * limitations under the License.
  */
 
+#include "thread_pool.h"
 
 #include <string>
 
 #include "atomic.h"
-#include "common_test.h"
-#include "thread_pool.h"
+#include "common_runtime_test.h"
 
 namespace art {
 
@@ -49,7 +49,7 @@
   const bool verbose_;
 };
 
-class ThreadPoolTest : public CommonTest {
+class ThreadPoolTest : public CommonRuntimeTest {
  public:
   static int32_t num_threads;
 };
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 9dc7b44..7242b81 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "transaction.h"
+
+#include "common_runtime_test.h"
 #include "invoke_arg_array_builder.h"
 #include "mirror/array-inl.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method-inl.h"
-#include "transaction.h"
 
 namespace art {
 
-class TransactionTest : public CommonTest {
-};
+class TransactionTest : public CommonRuntimeTest {};
 
 TEST_F(TransactionTest, Object_class) {
   ScopedObjectAccess soa(Thread::Current());
@@ -86,7 +86,10 @@
 
   // Allocate an array during transaction.
   SirtRef<mirror::Array> sirt_obj(soa.Self(),
-                                  mirror::Array::Alloc<false>(soa.Self(), sirt_klass.get(), kArraySize));
+                                  mirror::Array::Alloc<false>(soa.Self(), sirt_klass.get(),
+                                                              kArraySize,
+                                                              sirt_klass->GetComponentSize(),
+                                                              Runtime::Current()->GetHeap()->GetCurrentAllocator()));
   ASSERT_TRUE(sirt_obj.get() != nullptr);
   ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
   Runtime::Current()->ExitTransactionMode();
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 68d8417..df1ab94 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1232,7 +1232,7 @@
 
     execv(program, &args[0]);
 
-    *error_msg = StringPrintf("Failed to execv(%s): %s", command_line.c_str(), strerror(errno));
+    PLOG(FATAL) << "Failed to execv(" << command_line << ")";
     return false;
   } else {
     if (pid == -1) {
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 0d237e2..d804f6a 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -14,7 +14,9 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
+#include "utils.h"
+
+#include "common_runtime_test.h"
 #include "mirror/array.h"
 #include "mirror/array-inl.h"
 #include "mirror/object-inl.h"
@@ -22,15 +24,13 @@
 #include "mirror/string.h"
 #include "scoped_thread_state_change.h"
 #include "sirt_ref.h"
-#include "utils.h"
 
 namespace art {
 
 std::string PrettyArguments(const char* signature);
 std::string PrettyReturnType(const char* signature);
 
-class UtilsTest : public CommonTest {
-};
+class UtilsTest : public CommonRuntimeTest {};
 
 TEST_F(UtilsTest, PrettyDescriptor_ArrayReferences) {
   EXPECT_EQ("java.lang.Class[]", PrettyDescriptor("[Ljava/lang/Class;"));
@@ -362,9 +362,7 @@
   EXPECT_EQ(0U, error_msg.size()) << error_msg;
 }
 
-// TODO: Disabled due to hang tearing down CommonTest.
-// Renable after splitting into RuntimeTest and CompilerTest.
-TEST_F(UtilsTest, DISABLED_ExecError) {
+TEST_F(UtilsTest, ExecError) {
   std::vector<std::string> command;
   command.push_back("bogus");
   std::string error_msg;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index ffa2455..9dca7f5 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -14,18 +14,19 @@
  * limitations under the License.
  */
 
+#include "method_verifier.h"
+
 #include <stdio.h>
 
 #include "UniquePtr.h"
 #include "class_linker.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "dex_file.h"
-#include "method_verifier.h"
 
 namespace art {
 namespace verifier {
 
-class MethodVerifierTest : public CommonTest {
+class MethodVerifierTest : public CommonRuntimeTest {
  protected:
   void VerifyClass(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index dc320be..1a64c00 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -15,16 +15,17 @@
  */
 
 #include "reg_type.h"
-#include "reg_type_cache-inl.h"
+
+#include <set>
 
 #include "base/casts.h"
-#include "common_test.h"
-#include <set>
+#include "common_runtime_test.h"
+#include "reg_type_cache-inl.h"
 
 namespace art {
 namespace verifier {
 
-class RegTypeTest : public CommonTest {};
+class RegTypeTest : public CommonRuntimeTest {};
 
 TEST_F(RegTypeTest, ConstLoHi) {
   // Tests creating primitive types types.
@@ -335,7 +336,7 @@
 }
 
 
-class RegTypeReferenceTest : public CommonTest {};
+class RegTypeReferenceTest : public CommonRuntimeTest {};
 
 TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
   // Tests matching precisions. A reference type that was created precise doesn't
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 16394b0..0bf6767 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -22,12 +22,12 @@
 #include <zlib.h>
 
 #include "UniquePtr.h"
-#include "common_test.h"
+#include "common_runtime_test.h"
 #include "os.h"
 
 namespace art {
 
-class ZipArchiveTest : public CommonTest {};
+class ZipArchiveTest : public CommonRuntimeTest {};
 
 TEST_F(ZipArchiveTest, FindAndExtract) {
   std::string error_msg;
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index 87e4baf..0239a3c 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -36,6 +36,10 @@
         Assert.assertTrue(test1.compareTo(test2) > 0);
         Assert.assertTrue(test2.compareTo(test1) < 0);
 
+        Assert.assertEquals("".compareTo(""), 0);
+        Assert.assertTrue(test.compareTo("") > 0);
+        Assert.assertTrue("".compareTo(test) < 0);
+
         /* compare string with a nonzero offset, in left/right side */
         Assert.assertEquals(test.compareTo(sub), 0);
         Assert.assertEquals(sub.compareTo(test), 0);