Merge "Add ClassAccessor"
diff --git a/Android.mk b/Android.mk
index d6472be..1c94629 100644
--- a/Android.mk
+++ b/Android.mk
@@ -245,19 +245,6 @@
 test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist)
 	ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests
 
-# Valgrind.
-.PHONY: valgrind-test-art-host
-valgrind-test-art-host: valgrind-test-art-host-gtest
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-host32
-valgrind-test-art-host32: valgrind-test-art-host-gtest32
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-host64
-valgrind-test-art-host64: valgrind-test-art-host-gtest64
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
 ########################################################################
 # target test rules
 
@@ -332,19 +319,6 @@
 	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
 endif
 
-# Valgrind.
-.PHONY: valgrind-test-art-target
-valgrind-test-art-target: valgrind-test-art-target-gtest
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-target32
-valgrind-test-art-target32: valgrind-test-art-target-gtest32
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-target64
-valgrind-test-art-target64: valgrind-test-art-target-gtest64
-	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
 
 #######################
 # Fake packages for ART
@@ -487,6 +461,7 @@
 	sed -i '/libartd.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 	sed -i '/libdexfiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 	sed -i '/libprofiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt
+	sed -i '/libartbased.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 
 ########################################################################
 # Phony target for building what go/lem requires on host.
diff --git a/build/Android.bp b/build/Android.bp
index 2a5598f..3a1d583 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -127,8 +127,6 @@
     },
 
     include_dirs: [
-        "external/valgrind/include",
-        "external/valgrind",
         "external/vixl/src",
     ],
 
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b481352..c3f81a6 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -408,15 +408,9 @@
 ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
 ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
 ART_TEST_HOST_GTEST_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST_RULES :=
 ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST_RULES :=
 ART_TEST_HOST_GTEST_DEPENDENCIES :=
 
 ART_GTEST_TARGET_ANDROID_ROOT := '/system'
@@ -424,40 +418,6 @@
   ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
 endif
 
-ART_VALGRIND_TARGET_DEPENDENCIES :=
-
-# Has to match list in external/valgrind/Android.build_one.mk
-ART_VALGRIND_SUPPORTED_ARCH := arm arm64 x86_64
-
-# Valgrind is not supported for x86
-ifneq (,$(filter $(ART_VALGRIND_SUPPORTED_ARCH),$(TARGET_ARCH)))
-art_vg_arch := $(if $(filter x86_64,$(TARGET_ARCH)),amd64,$(TARGET_ARCH))
-ART_VALGRIND_TARGET_DEPENDENCIES += \
-  $(TARGET_OUT_EXECUTABLES)/valgrind \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(art_vg_arch)-linux \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(art_vg_arch)-linux.so \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(art_vg_arch)-linux.so \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/default.supp
-art_vg_arch :=
-endif
-
-ifdef TARGET_2ND_ARCH
-ifneq (,$(filter $(ART_VALGRIND_SUPPORTED_ARCH),$(TARGET_2ND_ARCH)))
-ART_VALGRIND_TARGET_DEPENDENCIES += \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(TARGET_2ND_ARCH)-linux \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(TARGET_2ND_ARCH)-linux.so \
-  $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(TARGET_2ND_ARCH)-linux.so
-endif
-endif
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := valgrind-target-suppressions.txt
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_TAGS := optional
-LOCAL_SRC_FILES := test/valgrind-target-suppressions.txt
-LOCAL_MODULE_PATH := $(ART_TARGET_TEST_OUT)
-include $(BUILD_PREBUILT)
-
 # Define a make rule for a target device gtest.
 # $(1): gtest name - the name of the test we're building such as leb128_test.
 # $(2): path relative to $OUT to the test binary
@@ -477,10 +437,9 @@
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
-    $$(ART_TARGET_TEST_OUT)/valgrind-target-suppressions.txt
+    $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
 
-$$(gtest_rule) valgrind-$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
+$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
 
 ifeq ($(ART_TEST_CHROOT),)
 # Non-chroot configuration.
@@ -515,37 +474,7 @@
   ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule)
   ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule)
 
-# File witnessing the success of the Valgrind gtest, the presence of which means the gtest's
-# success.
-valgrind_gtest_witness := \
-  $(maybe_art_test_chroot)$(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/valgrind-$$(gtest_rule)-$$$$PPID
-
-valgrind-$$(gtest_rule): VALGRIND_GTEST_WITNESS := $$(valgrind_gtest_witness)
-
-.PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-sync
-	$(hide) adb shell touch $$(VALGRIND_GTEST_WITNESS)
-	$(hide) adb shell rm $$(VALGRIND_GTEST_WITNESS)
-	$(hide) adb shell chmod 755 $(maybe_art_test_chroot)$$(PRIVATE_TARGET_EXE)
-	$(hide) $$(call ART_TEST_SKIP,$$@) && \
-	  (adb shell "$(maybe_chroot_command) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
-	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
-	       $(ART_GTEST_TARGET_ANDROID_ROOT)/bin/valgrind \
-	       --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
-	       --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
-	       --num-callers=50 --show-mismatched-frees=no $$(PRIVATE_TARGET_EXE) \
-	     && touch $$(VALGRIND_GTEST_WITNESS)" \
-	   && (adb pull $$(VALGRIND_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
-	   || $$(call ART_TEST_FAILED,$$@))
-	$(hide) rm -f /tmp/$$@-$$$$PPID
-
-  ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += \
-    valgrind-$$(gtest_rule)
-  ART_TEST_TARGET_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule)
-  ART_TEST_TARGET_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule)
-
   # Clear locally defined variables.
-  valgrind_gtest_witness :=
   gtest_witness :=
   maybe_chroot_command :=
   maybe_art_test_chroot :=
@@ -554,16 +483,6 @@
   gtest_rule :=
 endef  # define-art-gtest-rule-target
 
-ART_VALGRIND_DEPENDENCIES := \
-  $(HOST_OUT_EXECUTABLES)/valgrind \
-  $(HOST_OUT)/lib64/valgrind/memcheck-amd64-linux \
-  $(HOST_OUT)/lib64/valgrind/memcheck-x86-linux \
-  $(HOST_OUT)/lib64/valgrind/default.supp \
-  $(HOST_OUT)/lib64/valgrind/vgpreload_core-amd64-linux.so \
-  $(HOST_OUT)/lib64/valgrind/vgpreload_core-x86-linux.so \
-  $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-amd64-linux.so \
-  $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-x86-linux.so
-
 # Define make rules for a host gtests.
 # $(1): gtest name - the name of the test we're building such as leb128_test.
 # $(2): path relative to $OUT to the test binary
@@ -615,19 +534,6 @@
   ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule)
 
 
-.PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIES)
-	$(hide) $$(call ART_TEST_SKIP,$$@) && \
-	  VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \
-	  $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \
-	    --suppressions=art/test/valgrind-suppressions.txt --num-callers=50 \
-	    $$< && \
-	    $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
-
-  ART_TEST_HOST_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule)
-  ART_TEST_HOST_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule)
-  ART_TEST_HOST_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule)
-
   # Clear locally defined variables.
   gtest_deps :=
   gtest_exe :=
@@ -660,7 +566,6 @@
 
   ifndef ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES
     ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
-    ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
   endif
   $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),$$(art_gtest_filename),$(2),$$($(2)library_path)))
 
@@ -680,7 +585,6 @@
   art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename)))
   ifndef ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES
     ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
-    ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
   endif
   $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),$$(art_gtest_filename),$(2)))
 
@@ -699,13 +603,8 @@
 test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES)
 	$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
 
-.PHONY: valgrind-test-art-target-gtest-$$(art_gtest_name)
-valgrind-test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES)
-	$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
   # Clear now unused variables.
   ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
-  ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
   art_gtest_name :=
 endef  # define-art-gtest-target-both
 
@@ -718,13 +617,8 @@
 test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES)
 	$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
 
-.PHONY: valgrind-test-art-host-gtest-$$(art_gtest_name)
-valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES)
-	$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
   # Clear now unused variables.
   ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
-  ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
   art_gtest_name :=
 endef  # define-art-gtest-host-both
 
@@ -750,12 +644,11 @@
 $(foreach file, $(ART_TARGET_GTEST_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(patsubst %/,%,$$(dir $$(file))))_$$(notdir $$(basename $$(file)))))
 COMPILER_TARGET_GTEST_MAKE_TARGETS :=
 
-# Define all the combinations of host/target, valgrind and suffix such as:
-# test-art-host-gtest or valgrind-test-art-host-gtest64
+# Define all the combinations of host/target and suffix such as:
+# test-art-host-gtest or test-art-host-gtest64
 # $(1): host or target
 # $(2): HOST or TARGET
-# $(3): valgrind- or undefined
-# $(4): undefined, 32 or 64
+# $(3): undefined, 32 or 64
 define define-test-art-gtest-combination
   ifeq ($(1),host)
     ifneq ($(2),HOST)
@@ -770,12 +663,8 @@
     endif
   endif
 
-  rule_name := $(3)test-art-$(1)-gtest$(4)
-  ifeq ($(3),valgrind-)
-    dependencies := $$(ART_TEST_$(2)_VALGRIND_GTEST$(4)_RULES)
-  else
-    dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES)
-  endif
+  rule_name := test-art-$(1)-gtest$(3)
+  dependencies := $$(ART_TEST_$(2)_GTEST$(3)_RULES)
 
 .PHONY: $$(rule_name)
 $$(rule_name): $$(dependencies) dx d8-compat-dx desugar
@@ -786,21 +675,15 @@
   dependencies :=
 endef  # define-test-art-gtest-combination
 
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(ART_PHONY_TEST_TARGET_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,$(ART_PHONY_TEST_TARGET_SUFFIX)))
 ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
 endif
-$(eval $(call define-test-art-gtest-combination,host,HOST,,))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,))
-$(eval $(call define-test-art-gtest-combination,host,HOST,,$(ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(ART_PHONY_TEST_HOST_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,host,HOST,))
+$(eval $(call define-test-art-gtest-combination,host,HOST,$(ART_PHONY_TEST_HOST_SUFFIX)))
 ifneq ($(HOST_PREFER_32_BIT),true)
-$(eval $(call define-test-art-gtest-combination,host,HOST,,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,host,HOST,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
 endif
 
 # Clear locally defined variables.
@@ -817,15 +700,9 @@
 ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
 ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
 ART_TEST_HOST_GTEST_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST_RULES :=
 ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST_RULES :=
 ART_GTEST_TARGET_ANDROID_ROOT :=
 ART_GTEST_class_linker_test_DEX_DEPS :=
 ART_GTEST_class_table_test_DEX_DEPS :=
@@ -864,8 +741,6 @@
 ART_GTEST_dex2oat_environment_tests_DEX_DEPS :=
 ART_GTEST_heap_verification_test_DEX_DEPS :=
 ART_GTEST_verifier_deps_test_DEX_DEPS :=
-ART_VALGRIND_DEPENDENCIES :=
-ART_VALGRIND_TARGET_DEPENDENCIES :=
 $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=))
 $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
 ART_TEST_HOST_GTEST_MainStripped_DEX :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 517ac5c..ba3ef05 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -37,11 +37,9 @@
 endif
 
 # Use dex2oat debug version for better error reporting
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
 # $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# $(3): wrapper, e.g., valgrind.
-# $(4): dex2oat suffix, e.g, valgrind requires 32 right now.
-# $(5): multi-image.
+# $(3): multi-image.
 # NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
 # run-test --no-image
 define create-core-oat-host-rules
@@ -65,11 +63,11 @@
   endif
   ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
     #Technically this test is not precise, but hopefully good enough.
-    $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
+    $$(error found $(1) expected interpreter, interp-ac, or optimizing)
   endif
 
-  # If $(5) is true, generate a multi-image.
-  ifeq ($(5),true)
+  # If $(3) is true, generate a multi-image.
+  ifeq ($(3),true)
     core_multi_infix := -multi
     core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar
     core_multi_group := _multi
@@ -79,22 +77,18 @@
     core_multi_group :=
   endif
 
-  core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_IMG_SUFFIX)
-  core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_OAT_SUFFIX)
+  core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_IMG_SUFFIX)
+  core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_OAT_SUFFIX)
 
   # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
   ifeq ($(2),)
-    $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
+    HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
   else
-    $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
+    HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
   endif
-  $(3)HOST_CORE_IMG_OUTS += $$(core_image_name)
-  $(3)HOST_CORE_OAT_OUTS += $$(core_oat_name)
+  HOST_CORE_IMG_OUTS += $$(core_image_name)
+  HOST_CORE_OAT_OUTS += $$(core_oat_name)
 
-  # If we have a wrapper, make the target phony.
-  ifneq ($(3),)
-.PHONY: $$(core_image_name)
-  endif
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
@@ -102,7 +96,7 @@
 $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
 	@echo "host dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
-	$$(hide) $(3) $$(DEX2OAT)$(4) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
 	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
@@ -124,35 +118,27 @@
   core_infix :=
 endef  # create-core-oat-host-rules
 
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
-# $(2): wrapper.
-# $(3): dex2oat suffix.
-# $(4): multi-image.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
+# $(2): multi-image.
 define create-core-oat-host-rule-combination
-  $(call create-core-oat-host-rules,$(1),,$(2),$(3),$(4))
+  $(call create-core-oat-host-rules,$(1),,$(2))
 
   ifneq ($(HOST_PREFER_32_BIT),true)
-    $(call create-core-oat-host-rules,$(1),2ND_,$(2),$(3),$(4))
+    $(call create-core-oat-host-rules,$(1),2ND_,$(2))
   endif
 endef
 
-$(eval $(call create-core-oat-host-rule-combination,optimizing,,,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,,,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,,,true))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,,,true))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true))
-
-valgrindHOST_CORE_IMG_OUTS :=
-valgrindHOST_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false))
-
-valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
+$(eval $(call create-core-oat-host-rule-combination,optimizing,false))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,false))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,false))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,true))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,true))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,true))
 
 test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
 
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
+# $(2): 2ND_ or undefined
 define create-core-oat-target-rules
   core_compile_options :=
   core_image_name :=
@@ -176,36 +162,32 @@
   endif
   ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
     # Technically this test is not precise, but hopefully good enough.
-    $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
+    $$(error found $(1) expected interpreter, interp-ac, or optimizing)
   endif
 
-  core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(3)$(CORE_IMG_SUFFIX)
-  core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(3)$(CORE_OAT_SUFFIX)
+  core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
+  core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
 
   # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
   ifeq ($(2),)
     ifdef TARGET_2ND_ARCH
-      $(3)TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name)
+      TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name)
     else
-      $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
+      TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
     endif
   else
-    $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
+    TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
   endif
-  $(3)TARGET_CORE_IMG_OUTS += $$(core_image_name)
-  $(3)TARGET_CORE_OAT_OUTS += $$(core_oat_name)
+  TARGET_CORE_IMG_OUTS += $$(core_image_name)
+  TARGET_CORE_OAT_OUTS += $$(core_oat_name)
 
-  # If we have a wrapper, make the target phony.
-  ifneq ($(3),)
-.PHONY: $$(core_image_name)
-  endif
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
 $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
 	@echo "target dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
-	$$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
 	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
@@ -228,30 +210,18 @@
   core_infix :=
 endef  # create-core-oat-target-rules
 
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
-# $(2): wrapper.
-# $(3): dex2oat suffix.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
 define create-core-oat-target-rule-combination
-  $(call create-core-oat-target-rules,$(1),,$(2),$(3))
+  $(call create-core-oat-target-rules,$(1),)
 
   ifdef TARGET_2ND_ARCH
-    $(call create-core-oat-target-rules,$(1),2ND_,$(2),$(3))
+    $(call create-core-oat-target-rules,$(1),2ND_)
   endif
 endef
 
-$(eval $(call create-core-oat-target-rule-combination,optimizing,,))
-$(eval $(call create-core-oat-target-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-target-rule-combination,interp-ac,,))
-
-valgrindTARGET_CORE_IMG_OUTS :=
-valgrindTARGET_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32))
-
-valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
-
-valgrind-test-art-host-dex2oat: valgrind-test-art-host-dex2oat-host valgrind-test-art-host-dex2oat-target
+$(eval $(call create-core-oat-target-rule-combination,optimizing))
+$(eval $(call create-core-oat-target-rule-combination,interpreter))
+$(eval $(call create-core-oat-target-rule-combination,interp-ac))
 
 # Define a default core image that can be used for things like gtests that
 # need some image to run, but don't otherwise care which image is used.
diff --git a/cmdline/unit.h b/cmdline/unit.h
index ad6a03d..f73981f 100644
--- a/cmdline/unit.h
+++ b/cmdline/unit.h
@@ -21,8 +21,9 @@
 
 // Used for arguments that simply indicate presence (e.g. "-help") without any values.
 struct Unit {
-  // Avoid 'Conditional jump or move depends on uninitialised value(s)' errors
-  // when running valgrind by specifying a user-defined constructor.
+  // Historical note: We specified a user-defined constructor to avoid
+  // 'Conditional jump or move depends on uninitialised value(s)' errors
+  // when running Valgrind.
   Unit() {}
   Unit(const Unit&) = default;
   ~Unit() {}
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 893cad2..87e679f 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -207,11 +207,10 @@
       std::vector<DexRegisterMap> dex_reg_maps;
       if (accessor.HasCodeItem() && mi->code_info != nullptr) {
         const CodeInfo code_info(mi->code_info);
-        CodeInfoEncoding encoding = code_info.ExtractEncoding();
-        for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) {
-          const StackMap& stack_map = code_info.GetStackMapAt(s, encoding);
+        for (size_t s = 0; s < code_info.GetNumberOfStackMaps(); ++s) {
+          const StackMap stack_map = code_info.GetStackMapAt(s);
           dex_reg_maps.push_back(code_info.GetDexRegisterMapOf(
-              stack_map, encoding, accessor.RegistersSize()));
+              stack_map, accessor.RegistersSize()));
         }
       }
 
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 44504c1..a7adab5 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -100,15 +100,14 @@
       if (mi->code_info != nullptr) {
         // Use stack maps to create mapping table from pc to dex.
         const CodeInfo code_info(mi->code_info);
-        const CodeInfoEncoding encoding = code_info.ExtractEncoding();
-        pc2dex_map.reserve(code_info.GetNumberOfStackMaps(encoding));
-        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
-          StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+        pc2dex_map.reserve(code_info.GetNumberOfStackMaps());
+        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+          StackMap stack_map = code_info.GetStackMapAt(s);
           DCHECK(stack_map.IsValid());
-          const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
-          const int32_t dex = stack_map.GetDexPc(encoding.stack_map.encoding);
+          const uint32_t pc = stack_map.GetNativePcOffset(isa);
+          const int32_t dex = stack_map.GetDexPc();
           pc2dex_map.push_back({pc, dex});
-          if (stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+          if (stack_map.HasDexRegisterMap()) {
             // Guess that the first map with local variables is the end of prologue.
             prologue_end = std::min(prologue_end, pc);
           }
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 9ea9f01..c1bf915 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -99,12 +99,11 @@
   // Get stack maps sorted by pc (they might not be sorted internally).
   // TODO(dsrbecky) Remove this once stackmaps get sorted by pc.
   const CodeInfo code_info(method_info->code_info);
-  const CodeInfoEncoding encoding = code_info.ExtractEncoding();
   std::map<uint32_t, uint32_t> stack_maps;  // low_pc -> stack_map_index.
-  for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
-    StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+  for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+    StackMap stack_map = code_info.GetStackMapAt(s);
     DCHECK(stack_map.IsValid());
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+    if (!stack_map.HasDexRegisterMap()) {
       // The compiler creates stackmaps without register maps at the start of
       // basic blocks in order to keep instruction-accurate line number mapping.
       // However, we never stop at those (breakpoint locations always have map).
@@ -112,7 +111,7 @@
       // The main reason for this is to save space by avoiding undefined gaps.
       continue;
     }
-    const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
+    const uint32_t pc_offset = stack_map.GetNativePcOffset(isa);
     DCHECK_LE(pc_offset, method_info->code_size);
     DCHECK_LE(compilation_unit_code_address, method_info->code_address);
     const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -124,7 +123,7 @@
   for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) {
     const uint32_t low_pc = it->first;
     const uint32_t stack_map_index = it->second;
-    const StackMap& stack_map = code_info.GetStackMapAt(stack_map_index, encoding);
+    const StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
     auto next_it = it;
     next_it++;
     const uint32_t high_pc = next_it != stack_maps.end()
@@ -136,7 +135,7 @@
     }
 
     // Check that the stack map is in the requested range.
-    uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+    uint32_t dex_pc = stack_map.GetDexPc();
     if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
       // The variable is not in scope at this PC. Therefore omit the entry.
       // Note that this is different to None() entry which means in scope, but unknown location.
@@ -151,10 +150,10 @@
     DCHECK(dex_register_map.IsValid());
     CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item);
     reg_lo = dex_register_map.GetDexRegisterLocation(
-        vreg, accessor.RegistersSize(), code_info, encoding);
+        vreg, accessor.RegistersSize(), code_info);
     if (is64bitValue) {
       reg_hi = dex_register_map.GetDexRegisterLocation(
-          vreg + 1, accessor.RegistersSize(), code_info, encoding);
+          vreg + 1, accessor.RegistersSize(), code_info);
     }
 
     // Add location entry for this address range.
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index dc044c1..fe8b766 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -724,7 +724,8 @@
     return false;
   }
   DCHECK_GE(field->GetOffset().Int32Value(), 0);
-  // Do not interleave function calls with bit field writes to placate valgrind. Bug: 27552451.
+  // Historical note: We made sure not to interleave function calls with bit field writes to
+  // placate Valgrind. Bug: 27552451.
   uint32_t field_offset = field->GetOffset().Uint32Value();
   bool is_volatile = field->IsVolatile();
   result->field_idx = field_idx;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index fb556f4..de1be5b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -975,11 +975,10 @@
                         const CodeInfo& code_info,
                         const ArenaVector<HSuspendCheck*>& loop_headers,
                         ArenaVector<size_t>* covered) {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   for (size_t i = 0; i < loop_headers.size(); ++i) {
     if (loop_headers[i]->GetDexPc() == dex_pc) {
       if (graph.IsCompilingOsr()) {
-        DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
+        DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
       }
       ++(*covered)[i];
     }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index bcb2599..a340446 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -35,7 +35,6 @@
 #include "optimizing_compiler_stats.h"
 #include "read_barrier_option.h"
 #include "stack.h"
-#include "stack_map.h"
 #include "utils/label.h"
 
 namespace art {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index aa343b1..e7fe5b7 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
 
-#include "arch/arm64/quick_method_frame_info_arm64.h"
 #include "base/bit_field.h"
 #include "code_generator.h"
 #include "common_arm64.h"
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 42031f9..24dc2ee 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -448,11 +448,9 @@
       invoke_type,
       target_method,
       HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+  RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
   HandleInvoke(invoke,
-               in_vregs,
-               /* args */ nullptr,
-               graph_->GetNumberOfVRegs() - in_vregs,
-               /* is_range */ true,
+               operands,
                dex_file_->GetMethodShorty(method_idx),
                /* clinit_check */ nullptr,
                /* is_unresolved */ false);
@@ -916,10 +914,7 @@
 bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
                                       uint32_t dex_pc,
                                       uint32_t method_idx,
-                                      uint32_t number_of_vreg_arguments,
-                                      bool is_range,
-                                      uint32_t* args,
-                                      uint32_t register_index) {
+                                      const InstructionOperands& operands) {
   InvokeType invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode());
   const char* descriptor = dex_file_->GetMethodShorty(method_idx);
   DataType::Type return_type = DataType::FromShorty(descriptor[0]);
@@ -943,12 +938,9 @@
                                                          method_idx,
                                                          invoke_type);
     return HandleInvoke(invoke,
-                        number_of_vreg_arguments,
-                        args,
-                        register_index,
-                        is_range,
+                        operands,
                         descriptor,
-                        nullptr, /* clinit_check */
+                        nullptr /* clinit_check */,
                         true /* is_unresolved */);
   }
 
@@ -976,12 +968,7 @@
         invoke_type,
         target_method,
         HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
-    return HandleStringInit(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor);
+    return HandleStringInit(invoke, operands, descriptor);
   }
 
   // Potential class initialization check, in the case of a static method call.
@@ -1042,26 +1029,16 @@
                                                ImTable::GetImtIndex(resolved_method));
   }
 
-  return HandleInvoke(invoke,
-                      number_of_vreg_arguments,
-                      args,
-                      register_index,
-                      is_range,
-                      descriptor,
-                      clinit_check,
-                      false /* is_unresolved */);
+  return HandleInvoke(invoke, operands, descriptor, clinit_check, false /* is_unresolved */);
 }
 
 bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
                                                  uint32_t dex_pc,
                                                  uint32_t method_idx,
                                                  dex::ProtoIndex proto_idx,
-                                                 uint32_t number_of_vreg_arguments,
-                                                 bool is_range,
-                                                 uint32_t* args,
-                                                 uint32_t register_index) {
+                                                 const InstructionOperands& operands) {
   const char* descriptor = dex_file_->GetShorty(proto_idx);
-  DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
+  DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), operands.GetNumberOfOperands());
   DataType::Type return_type = DataType::FromShorty(descriptor[0]);
   size_t number_of_arguments = strlen(descriptor);
   HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
@@ -1070,10 +1047,7 @@
                                                         dex_pc,
                                                         method_idx);
   return HandleInvoke(invoke,
-                      number_of_vreg_arguments,
-                      args,
-                      register_index,
-                      is_range,
+                      operands,
                       descriptor,
                       nullptr /* clinit_check */,
                       false /* is_unresolved */);
@@ -1222,26 +1196,22 @@
 }
 
 bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke,
-                                               uint32_t number_of_vreg_arguments,
-                                               uint32_t* args,
-                                               uint32_t register_index,
-                                               bool is_range,
+                                               const InstructionOperands& operands,
                                                const char* descriptor,
                                                size_t start_index,
                                                size_t* argument_index) {
   uint32_t descriptor_index = 1;  // Skip the return type.
-
+  const size_t number_of_operands = operands.GetNumberOfOperands();
   for (size_t i = start_index;
        // Make sure we don't go over the expected arguments or over the number of
        // dex registers given. If the instruction was seen as dead by the verifier,
        // it hasn't been properly checked.
-       (i < number_of_vreg_arguments) && (*argument_index < invoke->GetNumberOfArguments());
+       (i < number_of_operands) && (*argument_index < invoke->GetNumberOfArguments());
        i++, (*argument_index)++) {
     DataType::Type type = DataType::FromShorty(descriptor[descriptor_index++]);
     bool is_wide = (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
-    if (!is_range
-        && is_wide
-        && ((i + 1 == number_of_vreg_arguments) || (args[i] + 1 != args[i + 1]))) {
+    if (is_wide && ((i + 1 == number_of_operands) ||
+                    (operands.GetOperand(i) + 1 != operands.GetOperand(i + 1)))) {
       // Longs and doubles should be in pairs, that is, sequential registers. The verifier should
       // reject any class where this is violated. However, the verifier only does these checks
       // on non trivially dead instructions, so we just bailout the compilation.
@@ -1252,7 +1222,7 @@
                       MethodCompilationStat::kNotCompiledMalformedOpcode);
       return false;
     }
-    HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
+    HInstruction* arg = LoadLocal(operands.GetOperand(i), type);
     invoke->SetArgumentAt(*argument_index, arg);
     if (is_wide) {
       i++;
@@ -1279,10 +1249,7 @@
 }
 
 bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
-                                       uint32_t number_of_vreg_arguments,
-                                       uint32_t* args,
-                                       uint32_t register_index,
-                                       bool is_range,
+                                       const InstructionOperands& operands,
                                        const char* descriptor,
                                        HClinitCheck* clinit_check,
                                        bool is_unresolved) {
@@ -1291,7 +1258,7 @@
   size_t start_index = 0;
   size_t argument_index = 0;
   if (invoke->GetInvokeType() != InvokeType::kStatic) {  // Instance call.
-    uint32_t obj_reg = is_range ? register_index : args[0];
+    uint32_t obj_reg = operands.GetOperand(0);
     HInstruction* arg = is_unresolved
         ? LoadLocal(obj_reg, DataType::Type::kReference)
         : LoadNullCheckedLocal(obj_reg, invoke->GetDexPc());
@@ -1300,14 +1267,7 @@
     argument_index = 1;
   }
 
-  if (!SetupInvokeArguments(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor,
-                            start_index,
-                            &argument_index)) {
+  if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
     return false;
   }
 
@@ -1327,24 +1287,14 @@
 }
 
 bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
-                                           uint32_t number_of_vreg_arguments,
-                                           uint32_t* args,
-                                           uint32_t register_index,
-                                           bool is_range,
+                                           const InstructionOperands& operands,
                                            const char* descriptor) {
   DCHECK(invoke->IsInvokeStaticOrDirect());
   DCHECK(invoke->AsInvokeStaticOrDirect()->IsStringInit());
 
   size_t start_index = 1;
   size_t argument_index = 0;
-  if (!SetupInvokeArguments(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor,
-                            start_index,
-                            &argument_index)) {
+  if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
     return false;
   }
 
@@ -1352,7 +1302,7 @@
 
   // This is a StringFactory call, not an actual String constructor. Its result
   // replaces the empty String pre-allocated by NewInstance.
-  uint32_t orig_this_reg = is_range ? register_index : args[0];
+  uint32_t orig_this_reg = operands.GetOperand(0);
   HInstruction* arg_this = LoadLocal(orig_this_reg, DataType::Type::kReference);
 
   // Replacing the NewInstance might render it redundant. Keep a list of these
@@ -1705,11 +1655,9 @@
 
 HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
                                                     dex::TypeIndex type_index,
-                                                    uint32_t number_of_vreg_arguments,
-                                                    bool is_range,
-                                                    uint32_t* args,
-                                                    uint32_t register_index) {
-  HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
+                                                    const InstructionOperands& operands) {
+  const size_t number_of_operands = operands.GetNumberOfOperands();
+  HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
   HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
   HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
   AppendInstruction(object);
@@ -1723,8 +1671,8 @@
   bool is_reference_array = (primitive == 'L') || (primitive == '[');
   DataType::Type type = is_reference_array ? DataType::Type::kReference : DataType::Type::kInt32;
 
-  for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
-    HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
+  for (size_t i = 0; i < number_of_operands; ++i) {
+    HInstruction* value = LoadLocal(operands.GetOperand(i), type);
     HInstruction* index = graph_->GetIntConstant(i, dex_pc);
     HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
     ssa_builder_->MaybeAddAmbiguousArraySet(aset);
@@ -2157,11 +2105,10 @@
       } else {
         method_idx = instruction.VRegB_35c();
       }
-      uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      if (!BuildInvoke(instruction, dex_pc, method_idx,
-                       number_of_vreg_arguments, false, args, -1)) {
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      if (!BuildInvoke(instruction, dex_pc, method_idx, operands)) {
         return false;
       }
       break;
@@ -2184,10 +2131,8 @@
       } else {
         method_idx = instruction.VRegB_3rc();
       }
-      uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
-      uint32_t register_index = instruction.VRegC();
-      if (!BuildInvoke(instruction, dex_pc, method_idx,
-                       number_of_vreg_arguments, true, nullptr, register_index)) {
+      RangeInstructionOperands operands(instruction.VRegC(), instruction.VRegA_3rc());
+      if (!BuildInvoke(instruction, dex_pc, method_idx, operands)) {
         return false;
       }
       break;
@@ -2196,32 +2141,17 @@
     case Instruction::INVOKE_POLYMORPHIC: {
       uint16_t method_idx = instruction.VRegB_45cc();
       dex::ProtoIndex proto_idx(instruction.VRegH_45cc());
-      uint32_t number_of_vreg_arguments = instruction.VRegA_45cc();
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      return BuildInvokePolymorphic(instruction,
-                                    dex_pc,
-                                    method_idx,
-                                    proto_idx,
-                                    number_of_vreg_arguments,
-                                    false,
-                                    args,
-                                    -1);
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
     }
 
     case Instruction::INVOKE_POLYMORPHIC_RANGE: {
       uint16_t method_idx = instruction.VRegB_4rcc();
       dex::ProtoIndex proto_idx(instruction.VRegH_4rcc());
-      uint32_t number_of_vreg_arguments = instruction.VRegA_4rcc();
-      uint32_t register_index = instruction.VRegC_4rcc();
-      return BuildInvokePolymorphic(instruction,
-                                    dex_pc,
-                                    method_idx,
-                                    proto_idx,
-                                    number_of_vreg_arguments,
-                                    true,
-                                    nullptr,
-                                    register_index);
+      RangeInstructionOperands operands(instruction.VRegC_4rcc(), instruction.VRegA_4rcc());
+      return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
     }
 
     case Instruction::NEG_INT: {
@@ -2769,30 +2699,19 @@
     }
 
     case Instruction::FILLED_NEW_ARRAY: {
-      uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       dex::TypeIndex type_index(instruction.VRegB_35c());
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      HNewArray* new_array = BuildFilledNewArray(dex_pc,
-                                                 type_index,
-                                                 number_of_vreg_arguments,
-                                                 /* is_range */ false,
-                                                 args,
-                                                 /* register_index */ 0);
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      HNewArray* new_array = BuildFilledNewArray(dex_pc, type_index, operands);
       BuildConstructorFenceForAllocation(new_array);
       break;
     }
 
     case Instruction::FILLED_NEW_ARRAY_RANGE: {
-      uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
       dex::TypeIndex type_index(instruction.VRegB_3rc());
-      uint32_t register_index = instruction.VRegC_3rc();
-      HNewArray* new_array = BuildFilledNewArray(dex_pc,
-                                                 type_index,
-                                                 number_of_vreg_arguments,
-                                                 /* is_range */ true,
-                                                 /* args*/ nullptr,
-                                                 register_index);
+      RangeInstructionOperands operands(instruction.VRegC_3rc(), instruction.VRegA_3rc());
+      HNewArray* new_array = BuildFilledNewArray(dex_pc, type_index, operands);
       BuildConstructorFenceForAllocation(new_array);
       break;
     }
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 9d886a8..2218a69 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -38,6 +38,7 @@
 class DexCompilationUnit;
 class HBasicBlockBuilder;
 class Instruction;
+class InstructionOperands;
 class OptimizingCompilerStats;
 class ScopedObjectAccess;
 class SsaBuilder;
@@ -168,10 +169,7 @@
   bool BuildInvoke(const Instruction& instruction,
                    uint32_t dex_pc,
                    uint32_t method_idx,
-                   uint32_t number_of_vreg_arguments,
-                   bool is_range,
-                   uint32_t* args,
-                   uint32_t register_index);
+                   const InstructionOperands& operands);
 
   // Builds an invocation node for invoke-polymorphic and returns whether the
   // instruction is supported.
@@ -179,18 +177,12 @@
                               uint32_t dex_pc,
                               uint32_t method_idx,
                               dex::ProtoIndex proto_idx,
-                              uint32_t number_of_vreg_arguments,
-                              bool is_range,
-                              uint32_t* args,
-                              uint32_t register_index);
+                              const InstructionOperands& operands);
 
   // Builds a new array node and the instructions that fill it.
   HNewArray* BuildFilledNewArray(uint32_t dex_pc,
                                  dex::TypeIndex type_index,
-                                 uint32_t number_of_vreg_arguments,
-                                 bool is_range,
-                                 uint32_t* args,
-                                 uint32_t register_index);
+                                 const InstructionOperands& operands);
 
   void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
 
@@ -260,28 +252,19 @@
                                      HInvoke* invoke);
 
   bool SetupInvokeArguments(HInvoke* invoke,
-                            uint32_t number_of_vreg_arguments,
-                            uint32_t* args,
-                            uint32_t register_index,
-                            bool is_range,
+                            const InstructionOperands& operands,
                             const char* descriptor,
                             size_t start_index,
                             size_t* argument_index);
 
   bool HandleInvoke(HInvoke* invoke,
-                    uint32_t number_of_vreg_arguments,
-                    uint32_t* args,
-                    uint32_t register_index,
-                    bool is_range,
+                    const InstructionOperands& operands,
                     const char* descriptor,
                     HClinitCheck* clinit_check,
                     bool is_unresolved);
 
   bool HandleStringInit(HInvoke* invoke,
-                        uint32_t number_of_vreg_arguments,
-                        uint32_t* args,
-                        uint32_t register_index,
-                        bool is_range,
+                        const InstructionOperands& operands,
                         const char* descriptor);
   void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index ca84d42..6e618f4 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -636,8 +636,8 @@
     return;
   }
 
-  // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
-  // the return value check with the `outcome` check, b/27651442 .
+  // Historical note: The `outcome` was initialized to please Valgrind - the compiler can reorder
+  // the return value check with the `outcome` check, b/27651442.
   bool outcome = false;
   if (TypeCheckHasKnownOutcome(check_cast->GetTargetClassRTI(), object, &outcome)) {
     if (outcome) {
@@ -682,8 +682,8 @@
     return;
   }
 
-  // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
-  // the return value check with the `outcome` check, b/27651442 .
+  // Historical note: The `outcome` was initialized to please Valgrind - the compiler can reorder
+  // the return value check with the `outcome` check, b/27651442.
   bool outcome = false;
   if (TypeCheckHasKnownOutcome(instruction->GetTargetClassRTI(), object, &outcome)) {
     MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 35e64f7..28ac942 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -458,8 +458,13 @@
       }
       if (from_all_predecessors) {
         if (ref_info->IsSingletonAndRemovable() &&
-            block->IsSingleReturnOrReturnVoidAllowingPhis()) {
-          // Values in the singleton are not needed anymore.
+            (block->IsSingleReturnOrReturnVoidAllowingPhis() ||
+             (block->EndsWithReturn() && (merged_value != kUnknownHeapValue ||
+                                          merged_store_value != kUnknownHeapValue)))) {
+          // Values in the singleton are not needed anymore:
+          // (1) if this block consists of a sole return, or
+          // (2) if this block returns and a usable merged value is obtained
+          //     (loads prior to the return will always use that value).
         } else if (!IsStore(merged_value)) {
           // We don't track merged value as a store anymore. We have to
           // hold the stores in predecessors live here.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 5f2833e..7f78dc2 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1951,6 +1951,11 @@
   return !GetInstructions().IsEmpty() && GetLastInstruction()->IsControlFlow();
 }
 
+bool HBasicBlock::EndsWithReturn() const {
+  return !GetInstructions().IsEmpty() &&
+      (GetLastInstruction()->IsReturn() || GetLastInstruction()->IsReturnVoid());
+}
+
 bool HBasicBlock::EndsWithIf() const {
   return !GetInstructions().IsEmpty() && GetLastInstruction()->IsIf();
 }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e786502..09d9c57 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1285,6 +1285,7 @@
   void SetLifetimeEnd(size_t end) { lifetime_end_ = end; }
 
   bool EndsWithControlFlowInstruction() const;
+  bool EndsWithReturn() const;
   bool EndsWithIf() const;
   bool EndsWithTryBoundary() const;
   bool HasSinglePhi() const;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7010e3f..aa28c8b 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -51,15 +51,7 @@
   if (sp_mask != nullptr) {
     stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
   }
-  if (inlining_depth > 0) {
-    number_of_stack_maps_with_inline_info_++;
-  }
 
-  // Note: dex_pc can be kNoDexPc for native method intrinsics.
-  if (dex_pc != dex::kDexNoIndex && (dex_pc_max_ == dex::kDexNoIndex || dex_pc_max_ < dex_pc)) {
-    dex_pc_max_ = dex_pc;
-  }
-  register_mask_max_ = std::max(register_mask_max_, register_mask);
   current_dex_register_ = 0;
 }
 
@@ -146,51 +138,6 @@
   current_inline_info_ = InlineInfoEntry();
 }
 
-CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const {
-  CodeOffset max_native_pc_offset;
-  for (const StackMapEntry& entry : stack_maps_) {
-    max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_code_offset);
-  }
-  return max_native_pc_offset;
-}
-
-size_t StackMapStream::PrepareForFillIn() {
-  CodeInfoEncoding encoding;
-  encoding.dex_register_map.num_entries = 0;  // TODO: Remove this field.
-  encoding.dex_register_map.num_bytes = ComputeDexRegisterMapsSize();
-  encoding.location_catalog.num_entries = location_catalog_entries_.size();
-  encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
-  encoding.inline_info.num_entries = inline_infos_.size();
-  // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
-  // dex_method_index_idx to be filled in.
-  PrepareMethodIndices();
-  ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
-                            encoding.dex_register_map.num_bytes);
-  CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
-  // Prepare the CodeInfo variable-sized encoding.
-  encoding.stack_mask.encoding.num_bits = stack_mask_max_ + 1;  // Need room for max element too.
-  encoding.stack_mask.num_entries = PrepareStackMasks(encoding.stack_mask.encoding.num_bits);
-  encoding.register_mask.encoding.num_bits = MinimumBitsToStore(register_mask_max_);
-  encoding.register_mask.num_entries = PrepareRegisterMasks();
-  encoding.stack_map.num_entries = stack_maps_.size();
-  encoding.stack_map.encoding.SetFromSizes(
-      // The stack map contains compressed native PC offsets.
-      max_native_pc_offset.CompressedValue(),
-      dex_pc_max_,
-      encoding.dex_register_map.num_bytes,
-      encoding.inline_info.num_entries,
-      encoding.register_mask.num_entries,
-      encoding.stack_mask.num_entries);
-  ComputeInvokeInfoEncoding(&encoding);
-  DCHECK_EQ(code_info_encoding_.size(), 0u);
-  encoding.Compress(&code_info_encoding_);
-  encoding.ComputeTableOffsets();
-  // Compute table offsets so we can get the non header size.
-  DCHECK_EQ(encoding.HeaderSize(), code_info_encoding_.size());
-  needed_size_ = code_info_encoding_.size() + encoding.NonHeaderSize();
-  return needed_size_;
-}
-
 size_t StackMapStream::ComputeDexRegisterLocationCatalogSize() const {
   size_t size = DexRegisterLocationCatalog::kFixedSize;
   for (const DexRegisterLocation& dex_register_location : location_catalog_entries_) {
@@ -204,6 +151,10 @@
   if (num_dex_registers == 0u) {
     return 0u;  // No register map will be emitted.
   }
+  size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
+  if (live_dex_registers_mask->NumSetBits() == 0) {
+    return 0u;  // No register map will be emitted.
+  }
   DCHECK(live_dex_registers_mask != nullptr);
 
   // Size of the map in bytes.
@@ -211,7 +162,6 @@
   // Add the live bit mask for the Dex register liveness.
   size += DexRegisterMap::GetLiveBitMaskSize(num_dex_registers);
   // Compute the size of the set of live Dex register entries.
-  size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
   size_t map_entries_size_in_bits =
       DexRegisterMap::SingleEntrySizeInBits(catalog_size) * number_of_live_dex_registers;
   size_t map_entries_size_in_bytes =
@@ -220,86 +170,6 @@
   return size;
 }
 
-size_t StackMapStream::ComputeDexRegisterMapsSize() const {
-  size_t size = 0;
-  for (const DexRegisterMapEntry& entry : dex_register_entries_) {
-    size += entry.ComputeSize(location_catalog_entries_.size());
-  }
-  return size;
-}
-
-void StackMapStream::ComputeInvokeInfoEncoding(CodeInfoEncoding* encoding) {
-  DCHECK(encoding != nullptr);
-  uint32_t native_pc_max = 0;
-  uint16_t method_index_max = 0;
-  size_t invoke_infos_count = 0;
-  size_t invoke_type_max = 0;
-  for (const StackMapEntry& entry : stack_maps_) {
-    if (entry.dex_method_index != dex::kDexNoIndex) {
-      native_pc_max = std::max(native_pc_max, entry.native_pc_code_offset.CompressedValue());
-      method_index_max = std::max(method_index_max, static_cast<uint16_t>(entry.dex_method_index));
-      invoke_type_max = std::max(invoke_type_max, static_cast<size_t>(entry.invoke_type));
-      ++invoke_infos_count;
-    }
-  }
-  encoding->invoke_info.num_entries = invoke_infos_count;
-  encoding->invoke_info.encoding.SetFromSizes(native_pc_max, invoke_type_max, method_index_max);
-}
-
-void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
-                                               size_t dex_register_maps_bytes) {
-  uint32_t method_index_max = 0;
-  uint32_t dex_pc_max = dex::kDexNoIndex;
-  uint32_t extra_data_max = 0;
-
-  uint32_t inline_info_index = 0;
-  for (const StackMapEntry& entry : stack_maps_) {
-    for (size_t j = 0; j < entry.inlining_depth; ++j) {
-      InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
-      if (inline_entry.method == nullptr) {
-        method_index_max = std::max(method_index_max, inline_entry.dex_method_index_idx);
-        extra_data_max = std::max(extra_data_max, 1u);
-      } else {
-        method_index_max = std::max(
-            method_index_max, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-        extra_data_max = std::max(
-            extra_data_max, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-      }
-      if (inline_entry.dex_pc != dex::kDexNoIndex &&
-          (dex_pc_max == dex::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
-        dex_pc_max = inline_entry.dex_pc;
-      }
-    }
-  }
-  DCHECK_EQ(inline_info_index, inline_infos_.size());
-
-  encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
-}
-
-size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
-                                               size_t* current_offset,
-                                               MemoryRegion dex_register_locations_region) {
-  DCHECK(current_offset != nullptr);
-  if ((entry.num_dex_registers == 0) || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
-    // No dex register map needed.
-    return StackMap::kNoDexRegisterMap;
-  }
-  if (entry.offset == DexRegisterMapEntry::kOffsetUnassigned) {
-    // Not already copied, need to copy and and assign an offset.
-    entry.offset = *current_offset;
-    const size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
-    DexRegisterMap dex_register_map(
-        dex_register_locations_region.Subregion(entry.offset, entry_size));
-    *current_offset += entry_size;
-    // Fill in the map since it was just added.
-    FillInDexRegisterMap(dex_register_map,
-                         entry.num_dex_registers,
-                         *entry.live_dex_registers_mask,
-                         entry.locations_start_index);
-  }
-  return entry.offset;
-}
-
 void StackMapStream::FillInMethodInfo(MemoryRegion region) {
   {
     MethodInfo info(region.begin(), method_indices_.size());
@@ -318,30 +188,64 @@
   }
 }
 
-void StackMapStream::FillInCodeInfo(MemoryRegion region) {
-  DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
-  DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
+template<typename Vector>
+static MemoryRegion EncodeMemoryRegion(Vector* out, size_t* bit_offset, uint32_t bit_length) {
+  uint32_t byte_length = BitsToBytesRoundUp(bit_length);
+  EncodeVarintBits(out, bit_offset, byte_length);
+  *bit_offset = RoundUp(*bit_offset, kBitsPerByte);
+  out->resize(out->size() + byte_length);
+  MemoryRegion region(out->data() + *bit_offset / kBitsPerByte, byte_length);
+  *bit_offset += kBitsPerByte * byte_length;
+  return region;
+}
 
-  DCHECK_EQ(region.size(), needed_size_);
+template<uint32_t NumColumns>
+using ScopedBitTableBuilder = BitTableBuilder<NumColumns, ScopedArenaAllocatorAdapter<uint32_t>>;
 
-  // Note that the memory region does not have to be zeroed when we JIT code
-  // because we do not use the arena allocator there.
+size_t StackMapStream::PrepareForFillIn() {
+  size_t bit_offset = 0;
+  out_.clear();
 
-  // Write the CodeInfo header.
-  region.CopyFrom(0, MemoryRegion(code_info_encoding_.data(), code_info_encoding_.size()));
+  // Decide the offsets of dex register map entries, but do not write them out yet.
+  // Needs to be done first as it modifies the stack map entry.
+  size_t dex_register_map_bytes = 0;
+  for (DexRegisterMapEntry& entry : dex_register_entries_) {
+    size_t size = entry.ComputeSize(location_catalog_entries_.size());
+    entry.offset = size == 0 ? DexRegisterMapEntry::kOffsetUnassigned : dex_register_map_bytes;
+    dex_register_map_bytes += size;
+  }
 
-  CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  DCHECK_EQ(encoding.stack_map.num_entries, stack_maps_.size());
+  // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
+  // dex_method_index_idx to be filled in.
+  PrepareMethodIndices();
 
-  MemoryRegion dex_register_locations_region = region.Subregion(
-      encoding.dex_register_map.byte_offset,
-      encoding.dex_register_map.num_bytes);
+  // Dedup stack masks. Needs to be done first as it modifies the stack map entry.
+  size_t stack_mask_bits = stack_mask_max_ + 1;  // Need room for max element too.
+  size_t num_stack_masks = PrepareStackMasks(stack_mask_bits);
 
-  // Set the Dex register location catalog.
-  MemoryRegion dex_register_location_catalog_region = region.Subregion(
-      encoding.location_catalog.byte_offset,
-      encoding.location_catalog.num_bytes);
+  // Dedup register masks. Needs to be done first as it modifies the stack map entry.
+  size_t num_register_masks = PrepareRegisterMasks();
+
+  // Write dex register maps.
+  MemoryRegion dex_register_map_region =
+      EncodeMemoryRegion(&out_, &bit_offset, dex_register_map_bytes * kBitsPerByte);
+  for (DexRegisterMapEntry& entry : dex_register_entries_) {
+    size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
+    if (entry_size != 0) {
+      DexRegisterMap dex_register_map(
+          dex_register_map_region.Subregion(entry.offset, entry_size));
+      FillInDexRegisterMap(dex_register_map,
+                           entry.num_dex_registers,
+                           *entry.live_dex_registers_mask,
+                           entry.locations_start_index);
+    }
+  }
+
+  // Write dex register catalog.
+  EncodeVarintBits(&out_, &bit_offset, location_catalog_entries_.size());
+  size_t location_catalog_bytes = ComputeDexRegisterLocationCatalogSize();
+  MemoryRegion dex_register_location_catalog_region =
+      EncodeMemoryRegion(&out_, &bit_offset, location_catalog_bytes * kBitsPerByte);
   DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
   // Offset in `dex_register_location_catalog` where to store the next
   // register location.
@@ -353,93 +257,87 @@
   // Ensure we reached the end of the Dex registers location_catalog.
   DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
 
-  ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream);
-  uintptr_t next_dex_register_map_offset = 0;
-  uintptr_t next_inline_info_index = 0;
-  size_t invoke_info_idx = 0;
-  for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
-    StackMap stack_map = code_info.GetStackMapAt(i, encoding);
-    StackMapEntry entry = stack_maps_[i];
-
-    stack_map.SetDexPc(encoding.stack_map.encoding, entry.dex_pc);
-    stack_map.SetNativePcCodeOffset(encoding.stack_map.encoding, entry.native_pc_code_offset);
-    stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
-    stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
-
-    size_t offset = MaybeCopyDexRegisterMap(dex_register_entries_[entry.dex_register_map_index],
-                                            &next_dex_register_map_offset,
-                                            dex_register_locations_region);
-    stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, offset);
-
+  // Write stack maps.
+  ScopedArenaAllocatorAdapter<void> adapter = allocator_->Adapter(kArenaAllocStackMapStream);
+  ScopedBitTableBuilder<StackMap::Field::kCount> stack_map_builder((adapter));
+  ScopedBitTableBuilder<InvokeInfo::Field::kCount> invoke_info_builder((adapter));
+  ScopedBitTableBuilder<InlineInfo::Field::kCount> inline_info_builder((adapter));
+  for (const StackMapEntry& entry : stack_maps_) {
     if (entry.dex_method_index != dex::kDexNoIndex) {
-      InvokeInfo invoke_info(code_info.GetInvokeInfo(encoding, invoke_info_idx));
-      invoke_info.SetNativePcCodeOffset(encoding.invoke_info.encoding, entry.native_pc_code_offset);
-      invoke_info.SetInvokeType(encoding.invoke_info.encoding, entry.invoke_type);
-      invoke_info.SetMethodIndexIdx(encoding.invoke_info.encoding, entry.dex_method_index_idx);
-      ++invoke_info_idx;
+      invoke_info_builder.AddRow(
+          entry.native_pc_code_offset.CompressedValue(),
+          entry.invoke_type,
+          entry.dex_method_index_idx);
     }
 
     // Set the inlining info.
-    if (entry.inlining_depth != 0) {
-      InlineInfo inline_info = code_info.GetInlineInfo(next_inline_info_index, encoding);
-
-      // Fill in the index.
-      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, next_inline_info_index);
-      DCHECK_EQ(next_inline_info_index, entry.inline_infos_start_index);
-      next_inline_info_index += entry.inlining_depth;
-
-      inline_info.SetDepth(encoding.inline_info.encoding, entry.inlining_depth);
-      DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
-
-      for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
-        InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
-        if (inline_entry.method != nullptr) {
-          inline_info.SetMethodIndexIdxAtDepth(
-              encoding.inline_info.encoding,
-              depth,
-              High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-          inline_info.SetExtraDataAtDepth(
-              encoding.inline_info.encoding,
-              depth,
-              Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-        } else {
-          inline_info.SetMethodIndexIdxAtDepth(encoding.inline_info.encoding,
-                                               depth,
-                                               inline_entry.dex_method_index_idx);
-          inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
-        }
-        inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
-        size_t dex_register_map_offset = MaybeCopyDexRegisterMap(
-            dex_register_entries_[inline_entry.dex_register_map_index],
-            &next_dex_register_map_offset,
-            dex_register_locations_region);
-        inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
-                                                   depth,
-                                                   dex_register_map_offset);
+    uint32_t inline_info_index = StackMap::kNoValue;
+    DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
+    for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
+      InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
+      uint32_t method_index_idx = inline_entry.dex_method_index_idx;
+      uint32_t extra_data = 1;
+      if (inline_entry.method != nullptr) {
+        method_index_idx = High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
+        extra_data = Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
       }
-    } else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
-    }
-  }
-
-  // Write stack masks table.
-  const size_t stack_mask_bits = encoding.stack_mask.encoding.BitSize();
-  if (stack_mask_bits > 0) {
-    size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte;
-    for (size_t i = 0; i < encoding.stack_mask.num_entries; ++i) {
-      MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes);
-      BitMemoryRegion stack_mask = code_info.GetStackMask(i, encoding);
-      for (size_t bit_index = 0; bit_index < stack_mask_bits; ++bit_index) {
-        stack_mask.StoreBit(bit_index, source.LoadBit(bit_index));
+      uint32_t index = inline_info_builder.AddRow(
+          (depth == entry.inlining_depth - 1) ? InlineInfo::kLast : InlineInfo::kMore,
+          method_index_idx,
+          inline_entry.dex_pc,
+          extra_data,
+          dex_register_entries_[inline_entry.dex_register_map_index].offset);
+      if (depth == 0) {
+        inline_info_index = index;
       }
     }
+    stack_map_builder.AddRow(
+        entry.native_pc_code_offset.CompressedValue(),
+        entry.dex_pc,
+        dex_register_entries_[entry.dex_register_map_index].offset,
+        inline_info_index,
+        entry.register_mask_index,
+        entry.stack_mask_index);
   }
+  stack_map_builder.Encode(&out_, &bit_offset);
+  invoke_info_builder.Encode(&out_, &bit_offset);
+  inline_info_builder.Encode(&out_, &bit_offset);
 
   // Write register masks table.
-  for (size_t i = 0; i < encoding.register_mask.num_entries; ++i) {
-    BitMemoryRegion register_mask = code_info.GetRegisterMask(i, encoding);
-    register_mask.StoreBits(0, register_masks_[i], encoding.register_mask.encoding.BitSize());
+  ScopedBitTableBuilder<1> register_mask_builder((adapter));
+  for (size_t i = 0; i < num_register_masks; ++i) {
+    register_mask_builder.AddRow(register_masks_[i]);
   }
+  register_mask_builder.Encode(&out_, &bit_offset);
+
+  // Write stack masks table.
+  EncodeVarintBits(&out_, &bit_offset, stack_mask_bits);
+  out_.resize(BitsToBytesRoundUp(bit_offset + stack_mask_bits * num_stack_masks));
+  BitMemoryRegion stack_mask_region(MemoryRegion(out_.data(), out_.size()),
+                                    bit_offset,
+                                    stack_mask_bits * num_stack_masks);
+  if (stack_mask_bits > 0) {
+    for (size_t i = 0; i < num_stack_masks; ++i) {
+      size_t stack_mask_bytes = BitsToBytesRoundUp(stack_mask_bits);
+      BitMemoryRegion src(MemoryRegion(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes));
+      BitMemoryRegion dst = stack_mask_region.Subregion(i * stack_mask_bits, stack_mask_bits);
+      for (size_t bit_index = 0; bit_index < stack_mask_bits; bit_index += BitSizeOf<uint32_t>()) {
+        size_t num_bits = std::min<size_t>(stack_mask_bits - bit_index, BitSizeOf<uint32_t>());
+        dst.StoreBits(bit_index, src.LoadBits(bit_index, num_bits), num_bits);
+      }
+    }
+  }
+
+  return UnsignedLeb128Size(out_.size()) +  out_.size();
+}
+
+void StackMapStream::FillInCodeInfo(MemoryRegion region) {
+  DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before FillIn";
+  DCHECK_EQ(region.size(), UnsignedLeb128Size(out_.size()) +  out_.size());
+
+  uint8_t* ptr = EncodeUnsignedLeb128(region.begin(), out_.size());
+  region.CopyFromVector(ptr - region.begin(), out_);
 
   // Verify all written data in debug build.
   if (kIsDebugBuild) {
@@ -527,7 +425,6 @@
                                          size_t num_dex_registers,
                                          BitVector* live_dex_registers_mask,
                                          size_t dex_register_locations_index) const {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   for (size_t reg = 0; reg < num_dex_registers; reg++) {
     // Find the location we tried to encode.
     DexRegisterLocation expected = DexRegisterLocation::None();
@@ -542,7 +439,7 @@
     } else {
       DCHECK(dex_register_map.IsDexRegisterLive(reg));
       DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(
-          reg, num_dex_registers, code_info, encoding);
+          reg, num_dex_registers, code_info);
       DCHECK_EQ(expected.GetKind(), seen.GetKind());
       DCHECK_EQ(expected.GetValue(), seen.GetValue());
     }
@@ -600,8 +497,9 @@
   for (StackMapEntry& stack_map : stack_maps_) {
     size_t index = dedup.size();
     MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
+    BitMemoryRegion stack_mask_bits(stack_mask);
     for (size_t i = 0; i < entry_size_in_bits; i++) {
-      stack_mask.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i));
+      stack_mask_bits.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i));
     }
     stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second;
   }
@@ -611,23 +509,23 @@
 // Check that all StackMapStream inputs are correctly encoded by trying to read them back.
 void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  DCHECK_EQ(code_info.GetNumberOfStackMaps(encoding), stack_maps_.size());
+  DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
+  DCHECK_EQ(code_info.GetNumberOfStackMaskBits(), static_cast<uint32_t>(stack_mask_max_ + 1));
+  DCHECK_EQ(code_info.GetNumberOfLocationCatalogEntries(), location_catalog_entries_.size());
   size_t invoke_info_index = 0;
   for (size_t s = 0; s < stack_maps_.size(); ++s) {
-    const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
+    const StackMap stack_map = code_info.GetStackMapAt(s);
     StackMapEntry entry = stack_maps_[s];
 
     // Check main stack map fields.
-    DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_),
+    DCHECK_EQ(stack_map.GetNativePcOffset(instruction_set_),
               entry.native_pc_code_offset.Uint32Value(instruction_set_));
-    DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
-    DCHECK_EQ(stack_map.GetRegisterMaskIndex(stack_map_encoding), entry.register_mask_index);
-    DCHECK_EQ(code_info.GetRegisterMaskOf(encoding, stack_map), entry.register_mask);
-    const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding);
-    DCHECK_EQ(stack_map.GetStackMaskIndex(stack_map_encoding), entry.stack_mask_index);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+    DCHECK_EQ(stack_map.GetDexPc(), entry.dex_pc);
+    DCHECK_EQ(stack_map.GetRegisterMaskIndex(), entry.register_mask_index);
+    DCHECK_EQ(code_info.GetRegisterMaskOf(stack_map), entry.register_mask);
+    const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits();
+    DCHECK_EQ(stack_map.GetStackMaskIndex(), entry.stack_mask_index);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     if (entry.sp_mask != nullptr) {
       DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits());
       for (size_t b = 0; b < num_stack_mask_bits; b++) {
@@ -639,38 +537,36 @@
       }
     }
     if (entry.dex_method_index != dex::kDexNoIndex) {
-      InvokeInfo invoke_info = code_info.GetInvokeInfo(encoding, invoke_info_index);
-      DCHECK_EQ(invoke_info.GetNativePcOffset(encoding.invoke_info.encoding, instruction_set_),
+      InvokeInfo invoke_info = code_info.GetInvokeInfo(invoke_info_index);
+      DCHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_),
                 entry.native_pc_code_offset.Uint32Value(instruction_set_));
-      DCHECK_EQ(invoke_info.GetInvokeType(encoding.invoke_info.encoding), entry.invoke_type);
-      DCHECK_EQ(invoke_info.GetMethodIndexIdx(encoding.invoke_info.encoding),
-                entry.dex_method_index_idx);
+      DCHECK_EQ(invoke_info.GetInvokeType(), entry.invoke_type);
+      DCHECK_EQ(invoke_info.GetMethodIndexIdx(), entry.dex_method_index_idx);
       invoke_info_index++;
     }
     CheckDexRegisterMap(code_info,
                         code_info.GetDexRegisterMapOf(
-                            stack_map, encoding, entry.dex_register_entry.num_dex_registers),
+                            stack_map, entry.dex_register_entry.num_dex_registers),
                         entry.dex_register_entry.num_dex_registers,
                         entry.dex_register_entry.live_dex_registers_mask,
                         entry.dex_register_entry.locations_start_index);
 
     // Check inline info.
-    DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
+    DCHECK_EQ(stack_map.HasInlineInfo(), (entry.inlining_depth != 0));
     if (entry.inlining_depth != 0) {
-      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-      DCHECK_EQ(inline_info.GetDepth(encoding.inline_info.encoding), entry.inlining_depth);
+      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+      DCHECK_EQ(inline_info.GetDepth(), entry.inlining_depth);
       for (size_t d = 0; d < entry.inlining_depth; ++d) {
         size_t inline_info_index = entry.inline_infos_start_index + d;
         DCHECK_LT(inline_info_index, inline_infos_.size());
         InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
-        DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, d),
-                  inline_entry.dex_pc);
-        if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, d)) {
-          DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
+        DCHECK_EQ(inline_info.GetDexPcAtDepth(d), inline_entry.dex_pc);
+        if (inline_info.EncodesArtMethodAtDepth(d)) {
+          DCHECK_EQ(inline_info.GetArtMethodAtDepth(d),
                     inline_entry.method);
         } else {
           const size_t method_index_idx =
-              inline_info.GetMethodIndexIdxAtDepth(encoding.inline_info.encoding, d);
+              inline_info.GetMethodIndexIdxAtDepth(d);
           DCHECK_EQ(method_index_idx, inline_entry.dex_method_index_idx);
           DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index);
         }
@@ -679,7 +575,6 @@
                             code_info.GetDexRegisterMapAtDepth(
                                 d,
                                 inline_info,
-                                encoding,
                                 inline_entry.dex_register_entry.num_dex_registers),
                             inline_entry.dex_register_entry.num_dex_registers,
                             inline_entry.dex_register_entry.live_dex_registers_mask,
@@ -690,7 +585,7 @@
 }
 
 size_t StackMapStream::ComputeMethodInfoSize() const {
-  DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before " << __FUNCTION__;
+  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before " << __FUNCTION__;
   return MethodInfo::ComputeSize(method_indices_.size());
 }
 
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 268e9bd..ea97cf6 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -73,36 +73,32 @@
         method_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
         stack_mask_max_(-1),
-        dex_pc_max_(kNoDexPc),
-        register_mask_max_(0),
-        number_of_stack_maps_with_inline_info_(0),
+        out_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(),
                                            allocator->Adapter(kArenaAllocStackMapStream)),
         current_entry_(),
         current_inline_info_(),
-        code_info_encoding_(allocator->Adapter(kArenaAllocStackMapStream)),
-        needed_size_(0),
         current_dex_register_(0),
         in_inline_frame_(false) {
     stack_maps_.reserve(10);
+    out_.reserve(64);
     location_catalog_entries_.reserve(4);
     dex_register_locations_.reserve(10 * 4);
     inline_infos_.reserve(2);
-    code_info_encoding_.reserve(16);
   }
 
   // A dex register map entry for a single stack map entry, contains what registers are live as
   // well as indices into the location catalog.
   class DexRegisterMapEntry {
    public:
-    static const size_t kOffsetUnassigned = -1;
+    static const uint32_t kOffsetUnassigned = -1;
 
     BitVector* live_dex_registers_mask;
     uint32_t num_dex_registers;
     size_t locations_start_index;
     // Computed fields
     size_t hash = 0;
-    size_t offset = kOffsetUnassigned;
+    uint32_t offset = kOffsetUnassigned;
 
     size_t ComputeSize(size_t catalog_size) const;
   };
@@ -113,7 +109,7 @@
     CodeOffset native_pc_code_offset;
     uint32_t register_mask;
     BitVector* sp_mask;
-    uint8_t inlining_depth;
+    uint32_t inlining_depth;
     size_t inline_infos_start_index;
     uint32_t stack_mask_index;
     uint32_t register_mask_index;
@@ -174,11 +170,6 @@
 
  private:
   size_t ComputeDexRegisterLocationCatalogSize() const;
-  size_t ComputeDexRegisterMapsSize() const;
-  void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
-                                 size_t dex_register_maps_bytes);
-
-  CodeOffset ComputeMaxNativePcCodeOffset() const;
 
   // Returns the number of unique stack masks.
   size_t PrepareStackMasks(size_t entry_size_in_bits);
@@ -197,24 +188,11 @@
   bool DexRegisterMapEntryEquals(const DexRegisterMapEntry& a, const DexRegisterMapEntry& b) const;
 
   // Fill in the corresponding entries of a register map.
-  void ComputeInvokeInfoEncoding(CodeInfoEncoding* encoding);
-
-  // Returns the index of an entry with the same dex register map as the current_entry,
-  // or kNoSameDexMapFound if no such entry exists.
-  size_t FindEntryWithTheSameDexMap();
-  bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
-
-  // Fill in the corresponding entries of a register map.
   void FillInDexRegisterMap(DexRegisterMap dex_register_map,
                             uint32_t num_dex_registers,
                             const BitVector& live_dex_registers_mask,
                             uint32_t start_index_in_dex_register_locations) const;
 
-  // Returns the offset for the dex register inside of the dex register location region. See FillIn.
-  // Only copies the dex register map if the offset for the entry is not already assigned.
-  size_t MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
-                                 size_t* current_offset,
-                                 MemoryRegion dex_register_locations_region);
   void CheckDexRegisterMap(const CodeInfo& code_info,
                            const DexRegisterMap& dex_register_map,
                            size_t num_dex_registers,
@@ -244,21 +222,16 @@
   ScopedArenaVector<uint32_t> method_indices_;
   ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_;
   int stack_mask_max_;
-  uint32_t dex_pc_max_;
-  uint32_t register_mask_max_;
-  size_t number_of_stack_maps_with_inline_info_;
+
+  ScopedArenaVector<uint8_t> out_;
 
   ScopedArenaSafeMap<uint32_t, ScopedArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
 
   StackMapEntry current_entry_;
   InlineInfoEntry current_inline_info_;
-  ScopedArenaVector<uint8_t> code_info_encoding_;
-  size_t needed_size_;
   uint32_t current_dex_register_;
   bool in_inline_frame_;
 
-  static constexpr uint32_t kNoSameDexMapFound = -1;
-
   DISALLOW_COPY_AND_ASSIGN(StackMapStream);
 };
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index e36c592..9db7588 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -29,14 +29,13 @@
 // to the given bit vector. Returns true if they are same.
 static bool CheckStackMask(
     const CodeInfo& code_info,
-    const CodeInfoEncoding& encoding,
     const StackMap& stack_map,
     const BitVector& bit_vector) {
-  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
-  if (bit_vector.GetNumberOfBits() > encoding.stack_mask.encoding.BitSize()) {
+  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
+  if (bit_vector.GetNumberOfBits() > code_info.GetNumberOfStackMaskBits()) {
     return false;
   }
-  for (size_t i = 0; i < encoding.stack_mask.encoding.BitSize(); ++i) {
+  for (size_t i = 0; i < code_info.GetNumberOfStackMaskBits(); ++i) {
     if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
       return false;
     }
@@ -65,30 +64,29 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 1-byte short Dex register location, and
   // - one 5-byte large Dex register location.
   size_t expected_location_catalog_size = 1u + 5u;
   ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask));
+  ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
   DexRegisterMap dex_register_map =
-      code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
   ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -99,16 +97,16 @@
   ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
   ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
-                0, number_of_dex_registers, code_info, encoding));
-  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
+  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
   size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
       0, number_of_dex_registers, number_of_catalog_entries);
@@ -125,7 +123,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, Test2) {
@@ -179,12 +177,11 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(4u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(7u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - six 1-byte short Dex register locations, and
   // - one 5-byte large Dex register location.
@@ -193,18 +190,18 @@
 
   // First stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(0);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+    ASSERT_EQ(0u, stack_map.GetDexPc());
+    ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -215,16 +212,16 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
-                  0, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
+    ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -241,29 +238,29 @@
     ASSERT_EQ(0, location0.GetValue());
     ASSERT_EQ(-2, location1.GetValue());
 
-    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(stack_map.HasInlineInfo());
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    ASSERT_EQ(2u, inline_info.GetDepth());
+    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(0));
+    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(1));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(0));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(1));
   }
 
   // Second stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(1, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
-    ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(1);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
+    ASSERT_EQ(1u, stack_map.GetDexPc());
+    ASSERT_EQ(128u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask2));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -274,17 +271,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(18, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(3, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -301,23 +298,23 @@
     ASSERT_EQ(18, location0.GetValue());
     ASSERT_EQ(3, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 
   // Third stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(2, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
-    ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(2);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u)));
+    ASSERT_EQ(2u, stack_map.GetDexPc());
+    ASSERT_EQ(192u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask3));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -328,17 +325,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(6, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(8, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -355,23 +352,23 @@
     ASSERT_EQ(6, location0.GetValue());
     ASSERT_EQ(8, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 
   // Fourth stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(3, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
-    ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(3);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u)));
+    ASSERT_EQ(3u, stack_map.GetDexPc());
+    ASSERT_EQ(256u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask4));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -382,17 +379,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(3, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(1, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -409,7 +406,7 @@
     ASSERT_EQ(3, location0.GetValue());
     ASSERT_EQ(1, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 }
 
@@ -440,12 +437,11 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 1-byte short Dex register locations, and
   // - one 5-byte large Dex register location.
@@ -454,17 +450,17 @@
 
   // First stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(0);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+    ASSERT_EQ(0u, stack_map.GetDexPc());
+    ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-    DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
+    DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers));
     ASSERT_TRUE(map.IsDexRegisterLive(0));
     ASSERT_TRUE(map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -474,15 +470,15 @@
     size_t expected_map_size = 1u + 1u;
     ASSERT_EQ(expected_map_size, map.Size());
 
-    ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstant,
-              map.GetLocationKind(1, number_of_dex_registers, code_info, encoding));
+              map.GetLocationKind(1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInStack,
-              map.GetLocationInternalKind(0, number_of_dex_registers, code_info, encoding));
+              map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstantLargeValue,
-              map.GetLocationInternalKind(1, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+              map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+    ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+    ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info));
 
     const size_t index0 =
         map.GetLocationCatalogEntryIndex(0, number_of_dex_registers, number_of_catalog_entries);
@@ -501,10 +497,10 @@
 
     // Test that the inline info dex register map deduplicated to the same offset as the stack map
     // one.
-    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, 0),
-              stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasInlineInfo());
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(0),
+              stack_map.GetDexRegisterMapOffset());
   }
 }
 
@@ -527,27 +523,26 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(1u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 5-byte large Dex register location.
   size_t expected_location_catalog_size = 5u;
   ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
   DexRegisterMap dex_register_map =
-      code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
   ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
   ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -558,14 +553,14 @@
   ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
   ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationInternalKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                1, number_of_dex_registers, code_info, encoding));
-  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
+  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
   size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
       0, number_of_dex_registers, number_of_catalog_entries);
@@ -582,7 +577,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 // Generate a stack map whose dex register offset is
@@ -620,11 +615,10 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   // The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
   // and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
   // has a size of 1 bit.
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
   ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_catalog_entries));
 
@@ -635,21 +629,21 @@
   //   locations (that is, 127 bytes of data).
   // Hence it has a size of 255 bytes, and therefore...
   ASSERT_EQ(128u, DexRegisterMap::GetLiveBitMaskSize(number_of_dex_registers));
-  StackMap stack_map0 = code_info.GetStackMapAt(0, encoding);
+  StackMap stack_map0 = code_info.GetStackMapAt(0);
   DexRegisterMap dex_register_map0 =
-      code_info.GetDexRegisterMapOf(stack_map0, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers);
   ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
                                                                number_of_catalog_entries));
   ASSERT_EQ(255u, dex_register_map0.Size());
 
-  StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
-  ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map.encoding));
+  StackMap stack_map1 = code_info.GetStackMapAt(1);
+  ASSERT_TRUE(stack_map1.HasDexRegisterMap());
   // ...the offset of the second Dex register map (relative to the
   // beginning of the Dex register maps region) is 255 (i.e.,
   // kNoDexRegisterMapSmallEncoding).
-  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            StackMap::kNoDexRegisterMap);
-  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding), 0xFFu);
+  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(),
+            StackMap::kNoValue);
+  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(), 0xFFu);
 }
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -682,33 +676,32 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
-  CodeInfoEncoding encoding = ci.ExtractEncoding();
 
   // Verify first stack map.
-  StackMap sm0 = ci.GetStackMapAt(0, encoding);
-  DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, number_of_dex_registers);
-  ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm0 = ci.GetStackMapAt(0);
+  DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
+  ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify second stack map.
-  StackMap sm1 = ci.GetStackMapAt(1, encoding);
-  DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, encoding, number_of_dex_registers);
-  ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm1 = ci.GetStackMapAt(1);
+  DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
+  ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify third stack map.
-  StackMap sm2 = ci.GetStackMapAt(2, encoding);
-  DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, encoding, number_of_dex_registers);
-  ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm2 = ci.GetStackMapAt(2);
+  DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
+  ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify dex register map offsets.
-  ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding));
-  ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
-  ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+  ASSERT_EQ(sm0.GetDexRegisterMapOffset(),
+            sm1.GetDexRegisterMapOffset());
+  ASSERT_NE(sm0.GetDexRegisterMapOffset(),
+            sm2.GetDexRegisterMapOffset());
+  ASSERT_NE(sm1.GetDexRegisterMapOffset(),
+            sm2.GetDexRegisterMapOffset());
 }
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -732,33 +725,32 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(0u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   ASSERT_EQ(0u, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap());
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 
-  stack_map = code_info.GetStackMapAt(1, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
-  ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  stack_map = code_info.GetStackMapAt(1);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68)));
+  ASSERT_EQ(1u, stack_map.GetDexPc());
+  ASSERT_EQ(68u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap());
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, InlineTest) {
@@ -835,100 +827,99 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
-  CodeInfoEncoding encoding = ci.ExtractEncoding();
 
   {
     // Verify first stack map.
-    StackMap sm0 = ci.GetStackMapAt(0, encoding);
+    StackMap sm0 = ci.GetStackMapAt(0);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, 2);
-    ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2);
+    ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
-    ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    InlineInfo if0 = ci.GetInlineInfoOf(sm0);
+    ASSERT_EQ(2u, if0.GetDepth());
+    ASSERT_EQ(2u, if0.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(3u, if0.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(1));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
-    ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, 1);
+    ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, encoding, 3);
-    ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
-    ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci, encoding));
-    ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, 3);
+    ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+    ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci));
+    ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci));
   }
 
   {
     // Verify second stack map.
-    StackMap sm1 = ci.GetStackMapAt(1, encoding);
+    StackMap sm1 = ci.GetStackMapAt(1);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, encoding, 2);
-    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2);
+    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
-    ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
+    InlineInfo if1 = ci.GetInlineInfoOf(sm1);
+    ASSERT_EQ(3u, if1.GetDepth());
+    ASSERT_EQ(2u, if1.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(3u, if1.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(1));
+    ASSERT_EQ(5u, if1.GetDexPcAtDepth(2));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(2));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
-    ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, 1);
+    ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, encoding, 3);
-    ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
-    ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci, encoding));
-    ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, 3);
+    ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+    ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci));
+    ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci));
 
-    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 2));
+    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(2));
   }
 
   {
     // Verify third stack map.
-    StackMap sm2 = ci.GetStackMapAt(2, encoding);
+    StackMap sm2 = ci.GetStackMapAt(2);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2);
     ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
-    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
-    ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
+    ASSERT_FALSE(sm2.HasInlineInfo());
   }
 
   {
     // Verify fourth stack map.
-    StackMap sm3 = ci.GetStackMapAt(3, encoding);
+    StackMap sm3 = ci.GetStackMapAt(3);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, encoding, 2);
-    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2);
+    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
-    ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
+    InlineInfo if2 = ci.GetInlineInfoOf(sm3);
+    ASSERT_EQ(3u, if2.GetDepth());
+    ASSERT_EQ(2u, if2.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(5u, if2.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(1));
+    ASSERT_EQ(10u, if2.GetDexPcAtDepth(2));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(2));
 
-    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(0));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, encoding, 1);
-    ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, 1);
+    ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, encoding, 2);
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, 2);
     ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0));
-    ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci, encoding));
+    ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci));
   }
 }
 
 TEST(StackMapTest, CodeOffsetTest) {
-  // Test minimum alignments, encoding, and decoding.
+  // Test minimum alignments, and decoding.
   CodeOffset offset_thumb2 =
       CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
   CodeOffset offset_arm64 =
@@ -969,13 +960,12 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
-  StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding);
-  StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding);
-  EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map.encoding),
-            stack_map2.GetStackMaskIndex(encoding.stack_map.encoding));
+  StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4);
+  StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8);
+  EXPECT_EQ(stack_map1.GetStackMaskIndex(),
+            stack_map2.GetStackMaskIndex());
 }
 
 TEST(StackMapTest, TestInvokeInfo) {
@@ -1007,26 +997,25 @@
 
   CodeInfo code_info(code_info_region);
   MethodInfo method_info(method_info_region.begin());
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(3u, code_info.GetNumberOfStackMaps());
 
-  InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4, encoding));
-  InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8, encoding));
-  InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16, encoding));
-  InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12, encoding));
+  InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4));
+  InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8));
+  InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16));
+  InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12));
   EXPECT_FALSE(invoke_invalid.IsValid());  // No entry for that index.
   EXPECT_TRUE(invoke1.IsValid());
   EXPECT_TRUE(invoke2.IsValid());
   EXPECT_TRUE(invoke3.IsValid());
-  EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper);
-  EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding, method_info), 1u);
-  EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u);
-  EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic);
-  EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding, method_info), 3u);
-  EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u);
-  EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect);
-  EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding, method_info), 65535u);
-  EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u);
+  EXPECT_EQ(invoke1.GetInvokeType(), kSuper);
+  EXPECT_EQ(invoke1.GetMethodIndex(method_info), 1u);
+  EXPECT_EQ(invoke1.GetNativePcOffset(kRuntimeISA), 4u);
+  EXPECT_EQ(invoke2.GetInvokeType(), kStatic);
+  EXPECT_EQ(invoke2.GetMethodIndex(method_info), 3u);
+  EXPECT_EQ(invoke2.GetNativePcOffset(kRuntimeISA), 8u);
+  EXPECT_EQ(invoke3.GetInvokeType(), kDirect);
+  EXPECT_EQ(invoke3.GetMethodIndex(method_info), 65535u);
+  EXPECT_EQ(invoke3.GetNativePcOffset(kRuntimeISA), 16u);
 }
 
 }  // namespace art
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 73ef028..a782b16 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -40,21 +40,6 @@
   kNoByteRegister = -1  // Signals an illegal register.
 };
 
-
-enum XmmRegister {
-  XMM0 = 0,
-  XMM1 = 1,
-  XMM2 = 2,
-  XMM3 = 3,
-  XMM4 = 4,
-  XMM5 = 5,
-  XMM6 = 6,
-  XMM7 = 7,
-  kNumberOfXmmRegisters = 8,
-  kNoXmmRegister = -1  // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
-
 enum X87Register {
   ST0 = 0,
   ST1 = 1,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6b65aca..00c893a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -657,7 +657,7 @@
     // the runtime.
     LogCompletionTime();
 
-    if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+    if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
       // We want to just exit on non-debug builds, not bringing the runtime down
       // in an orderly fashion. So release the following fields.
       driver_.release();
@@ -3119,9 +3119,9 @@
 int main(int argc, char** argv) {
   int result = static_cast<int>(art::Dex2oat(argc, argv));
   // Everything was done, do an explicit exit here to avoid running Runtime destructors that take
-  // time (bug 10645725) unless we're a debug or instrumented build or running on valgrind. Note:
-  // The Dex2Oat class should not destruct the runtime in this case.
-  if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && (RUNNING_ON_MEMORY_TOOL == 0)) {
+  // time (bug 10645725) unless we're a debug or instrumented build or running on a memory tool.
+  // Note: The Dex2Oat class should not destruct the runtime in this case.
+  if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && !art::kRunningOnMemoryTool) {
     _exit(result);
   }
   return result;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2fe16f7..1d0735d 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -472,8 +472,8 @@
 };
 
 TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
-  // Native memory usage isn't correctly tracked under sanitization.
-  TEST_DISABLED_FOR_MEMORY_TOOL_ASAN();
+  // Native memory usage isn't correctly tracked when running under ASan.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
 
   // The `native_alloc_2_ >= native_alloc_1_` assertion below may not
   // hold true on some x86 systems; disable this test while we
@@ -1054,8 +1054,6 @@
 }
 
 TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
-  TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND();  // b/63052624
-
   // The watchdog is independent of dex2oat and will not delete intermediates. It is possible
   // that the compilation succeeds and the file is completely written by the time the watchdog
   // kills dex2oat (but the dex2oat threads must have been scheduled pretty badly).
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 01726af..8ae93ff 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2083,7 +2083,8 @@
         size_t size = ArtMethod::Size(target_ptr_size_);
         size_t alignment = ArtMethod::Alignment(target_ptr_size_);
         memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0, size, alignment));
-        // Clear padding to avoid non-deterministic data in the image (and placate valgrind).
+        // Clear padding to avoid non-deterministic data in the image.
+        // Historical note: We also did that to placate Valgrind.
         reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(dest)->ClearPadding(size, alignment);
         break;
       }
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index c8a06ed..adf0ad6 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -147,8 +147,10 @@
         "arch/instruction_set_test.cc",
         "base/arena_allocator_test.cc",
         "base/bit_field_test.cc",
+        "base/bit_memory_region_test.cc",
         "base/bit_string_test.cc",
         "base/bit_struct_test.cc",
+        "base/bit_table_test.cc",
         "base/bit_utils_test.cc",
         "base/bit_vector_test.cc",
         "base/file_utils_test.cc",
diff --git a/libartbase/base/arena_allocator.h b/libartbase/base/arena_allocator.h
index 211ff4f..4ad77ba 100644
--- a/libartbase/base/arena_allocator.h
+++ b/libartbase/base/arena_allocator.h
@@ -147,34 +147,9 @@
 
 typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
 
-template <bool kAvailable, bool kValgrind>
-class ArenaAllocatorMemoryToolCheckImpl {
-  // This is the generic template but since there is a partial specialization
-  // for kValgrind == false, this can be instantiated only for kValgrind == true.
-  static_assert(kValgrind, "This template can be instantiated only for Valgrind.");
-  static_assert(kAvailable, "Valgrind implies memory tool availability.");
-
+class ArenaAllocatorMemoryTool {
  public:
-  ArenaAllocatorMemoryToolCheckImpl() : is_running_on_valgrind_(RUNNING_ON_MEMORY_TOOL) { }
-  bool IsRunningOnMemoryTool() { return is_running_on_valgrind_; }
-
- private:
-  const bool is_running_on_valgrind_;
-};
-
-template <bool kAvailable>
-class ArenaAllocatorMemoryToolCheckImpl<kAvailable, false> {
- public:
-  ArenaAllocatorMemoryToolCheckImpl() { }
-  bool IsRunningOnMemoryTool() { return kAvailable; }
-};
-
-typedef ArenaAllocatorMemoryToolCheckImpl<kMemoryToolIsAvailable, kMemoryToolIsValgrind>
-    ArenaAllocatorMemoryToolCheck;
-
-class ArenaAllocatorMemoryTool : private ArenaAllocatorMemoryToolCheck {
- public:
-  using ArenaAllocatorMemoryToolCheck::IsRunningOnMemoryTool;
+  bool IsRunningOnMemoryTool() { return kMemoryToolIsAvailable; }
 
   void MakeDefined(void* ptr, size_t size) {
     if (UNLIKELY(IsRunningOnMemoryTool())) {
diff --git a/libartbase/base/arena_allocator_test.cc b/libartbase/base/arena_allocator_test.cc
index e358710..6323a2b 100644
--- a/libartbase/base/arena_allocator_test.cc
+++ b/libartbase/base/arena_allocator_test.cc
@@ -16,6 +16,7 @@
 
 #include "arena_allocator-inl.h"
 #include "arena_bit_vector.h"
+#include "base/common_art_test.h"
 #include "gtest/gtest.h"
 #include "malloc_arena_pool.h"
 #include "memory_tool.h"
@@ -146,11 +147,8 @@
 }
 
 TEST_F(ArenaAllocatorTest, ReallocReuse) {
-  // Realloc does not reuse arenas when running under sanitization. So we cannot do those
-  if (RUNNING_ON_MEMORY_TOOL != 0) {
-    printf("WARNING: TEST DISABLED FOR MEMORY_TOOL\n");
-    return;
-  }
+  // Realloc does not reuse arenas when running under sanitization.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
 
   {
     // Case 1: small aligned allocation, aligned extend inside arena.
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index f3926bc..3f4d0ba 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -19,6 +19,9 @@
 
 #include "memory_region.h"
 
+#include "bit_utils.h"
+#include "memory_tool.h"
+
 namespace art {
 
 // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
@@ -26,46 +29,126 @@
 class BitMemoryRegion FINAL : public ValueObject {
  public:
   BitMemoryRegion() = default;
-  ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_size) {
-    bit_start_ = bit_offset % kBitsPerByte;
-    const size_t start = bit_offset / kBitsPerByte;
-    const size_t end = (bit_offset + bit_size + kBitsPerByte - 1) / kBitsPerByte;
-    region_ = region.Subregion(start, end - start);
+  ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
+    : data_(reinterpret_cast<uintptr_t*>(AlignDown(region.pointer(), sizeof(uintptr_t)))),
+      bit_start_(8 * (reinterpret_cast<uintptr_t>(region.pointer()) % sizeof(uintptr_t))),
+      bit_size_(region.size_in_bits()) {
+  }
+  ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length)
+    : BitMemoryRegion(region) {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    bit_start_ += bit_offset;
+    bit_size_ = bit_length;
   }
 
-  void* pointer() const { return region_.pointer(); }
-  size_t size() const { return region_.size(); }
-  size_t BitOffset() const { return bit_start_; }
+  ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; }
+
   size_t size_in_bits() const {
-    return region_.size_in_bits();
+    return bit_size_;
   }
 
-  ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_size) const {
-    return BitMemoryRegion(region_, bit_start_ + bit_offset, bit_size);
+  ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    BitMemoryRegion result = *this;
+    result.bit_start_ += bit_offset;
+    result.bit_size_ = bit_length;
+    return result;
   }
 
   // Load a single bit in the region. The bit at offset 0 is the least
   // significant bit in the first byte.
+  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
   ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
-    return region_.LoadBit(bit_offset + bit_start_);
+    DCHECK_LT(bit_offset, bit_size_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
+    return ((data_[index] >> shift) & 1) != 0;
   }
 
   ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
-    region_.StoreBit(bit_offset + bit_start_, value);
+    DCHECK_LT(bit_offset, bit_size_);
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    data[index] &= ~(1 << shift);  // Clear bit.
+    data[index] |= (value ? 1 : 0) << shift;  // Set bit.
+    DCHECK_EQ(value, LoadBit(bit_offset));
   }
 
-  ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
-    return region_.LoadBits(bit_offset + bit_start_, length);
+  // Load `bit_length` bits from `data` starting at given `bit_offset`.
+  // The least significant bit is stored in the smallest memory offset.
+  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
+  ALWAYS_INLINE uint32_t LoadBits(size_t bit_offset, size_t bit_length) const {
+    DCHECK(IsAligned<sizeof(uintptr_t)>(data_));
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
+    if (bit_length == 0) {
+      return 0;
+    }
+    uintptr_t mask = std::numeric_limits<uintptr_t>::max() >> (kBitsPerIntPtrT - bit_length);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
+    uintptr_t value = data_[index] >> shift;
+    size_t finished_bits = kBitsPerIntPtrT - shift;
+    if (finished_bits < bit_length) {
+      value |= data_[index + 1] << finished_bits;
+    }
+    return value & mask;
   }
 
-  // Store at a bit offset from inside the bit memory region.
-  ALWAYS_INLINE void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
-    region_.StoreBits(bit_offset + bit_start_, value, length);
+  // Load bits starting at given `bit_offset`, and advance the `bit_offset`.
+  ALWAYS_INLINE uint32_t LoadBitsAndAdvance(size_t* bit_offset, size_t bit_length) const {
+    uint32_t result = LoadBits(*bit_offset, bit_length);
+    *bit_offset += bit_length;
+    return result;
+  }
+
+  // Store `bit_length` bits in `data` starting at given `bit_offset`.
+  // The least significant bit is stored in the smallest memory offset.
+  ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
+    DCHECK_LE(value, MaxInt<uint32_t>(bit_length));
+    if (bit_length == 0) {
+      return;
+    }
+    // Write data byte by byte to avoid races with other threads
+    // on bytes that do not overlap with this region.
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    data[index] &= ~(mask << shift);  // Clear bits.
+    data[index] |= (value << shift);  // Set bits.
+    size_t finished_bits = kBitsPerByte - shift;
+    for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) {
+      data[index + i] &= ~(mask >> finished_bits);  // Clear bits.
+      data[index + i] |= (value >> finished_bits);  // Set bits.
+    }
+    DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
+  }
+
+  // Store bits starting at given `bit_offset`, and advance the `bit_offset`.
+  ALWAYS_INLINE void StoreBitsAndAdvance(size_t* bit_offset, uint32_t value, size_t bit_length) {
+    StoreBits(*bit_offset, value, bit_length);
+    *bit_offset += bit_length;
+  }
+
+  ALWAYS_INLINE bool Equals(const BitMemoryRegion& other) const {
+    return data_ == other.data_ &&
+           bit_start_ == other.bit_start_ &&
+           bit_size_ == other.bit_size_;
   }
 
  private:
-  MemoryRegion region_;
+  // The data pointer must be naturally aligned. This makes loading code faster.
+  uintptr_t* data_ = nullptr;
   size_t bit_start_ = 0;
+  size_t bit_size_ = 0;
 };
 
 }  // namespace art
diff --git a/libartbase/base/bit_memory_region_test.cc b/libartbase/base/bit_memory_region_test.cc
new file mode 100644
index 0000000..b754698
--- /dev/null
+++ b/libartbase/base/bit_memory_region_test.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_memory_region.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static void CheckBits(uint8_t* data,
+                      size_t size,
+                      uint32_t init,
+                      size_t offset,
+                      size_t length,
+                      uint32_t value) {
+  for (size_t i = 0; i < size * kBitsPerByte; i++) {
+    uint8_t expected = (offset <= i && i < offset + length) ? value >> (i - offset) : init;
+    uint8_t actual = data[i / kBitsPerByte] >> (i % kBitsPerByte);
+    EXPECT_EQ(expected & 1, actual & 1);
+  }
+}
+
+TEST(BitMemoryRegion, TestBit) {
+  uint8_t data[sizeof(uint32_t) * 2];
+  for (size_t bit_offset = 0; bit_offset < 2 * sizeof(uint32_t) * kBitsPerByte; ++bit_offset) {
+    for (uint32_t initial_value = 0; initial_value <= 1; initial_value++) {
+      for (uint32_t value = 0; value <= 1; value++) {
+        // Check Store and Load with bit_offset set on the region.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr1(MemoryRegion(&data, sizeof(data)), bit_offset, 1);
+        bmr1.StoreBit(0, value);
+        EXPECT_EQ(bmr1.LoadBit(0), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, 1, value);
+        // Check Store and Load with bit_offset set on the methods.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr2(MemoryRegion(&data, sizeof(data)));
+        bmr2.StoreBit(bit_offset, value);
+        EXPECT_EQ(bmr2.LoadBit(bit_offset), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, 1, value);
+      }
+    }
+  }
+}
+
+TEST(BitMemoryRegion, TestBits) {
+  uint8_t data[sizeof(uint32_t) * 4];
+  for (size_t bit_offset = 0; bit_offset < 3 * sizeof(uint32_t) * kBitsPerByte; ++bit_offset) {
+    uint32_t mask = 0;
+    for (size_t bit_length = 0; bit_length < sizeof(uint32_t) * kBitsPerByte; ++bit_length) {
+      const uint32_t value = 0xDEADBEEF & mask;
+      for (uint32_t initial_value = 0; initial_value <= 1; initial_value++) {
+        // Check Store and Load with bit_offset set on the region.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr1(MemoryRegion(&data, sizeof(data)), bit_offset, bit_length);
+        bmr1.StoreBits(0, value, bit_length);
+        EXPECT_EQ(bmr1.LoadBits(0, bit_length), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, bit_length, value);
+        // Check Store and Load with bit_offset set on the methods.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr2(MemoryRegion(&data, sizeof(data)));
+        bmr2.StoreBits(bit_offset, value, bit_length);
+        EXPECT_EQ(bmr2.LoadBits(bit_offset, bit_length), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, bit_length, value);
+      }
+      mask = (mask << 1) | 1;
+    }
+  }
+}
+
+}  // namespace art
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
new file mode 100644
index 0000000..24bdd13
--- /dev/null
+++ b/libartbase/base/bit_table.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_BIT_TABLE_H_
+#define ART_LIBARTBASE_BASE_BIT_TABLE_H_
+
+#include <vector>
+
+#include "base/bit_memory_region.h"
+#include "base/bit_utils.h"
+#include "base/memory_region.h"
+
+namespace art {
+
+constexpr uint32_t kVarintHeaderBits = 4;
+constexpr uint32_t kVarintSmallValue = 11;  // Maximum value which is stored as-is.
+
+// Load variable-length bit-packed integer from `data` starting at `bit_offset`.
+// The first four bits determine the variable length of the encoded integer:
+//   Values 0..11 represent the result as-is, with no further following bits.
+//   Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
+ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryRegion region, size_t* bit_offset) {
+  uint32_t x = region.LoadBitsAndAdvance(bit_offset, kVarintHeaderBits);
+  if (x > kVarintSmallValue) {
+    x = region.LoadBitsAndAdvance(bit_offset, (x - kVarintSmallValue) * kBitsPerByte);
+  }
+  return x;
+}
+
+// Store variable-length bit-packed integer from `data` starting at `bit_offset`.
+template<typename Vector>
+ALWAYS_INLINE static inline void EncodeVarintBits(Vector* out, size_t* bit_offset, uint32_t value) {
+  if (value <= kVarintSmallValue) {
+    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    region.StoreBitsAndAdvance(bit_offset, value, kVarintHeaderBits);
+  } else {
+    uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
+    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits + num_bits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
+    region.StoreBitsAndAdvance(bit_offset, header, kVarintHeaderBits);
+    region.StoreBitsAndAdvance(bit_offset, value, num_bits);
+  }
+}
+
+template<uint32_t kNumColumns>
+class BitTable {
+ public:
+  class Accessor {
+   public:
+    static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max();
+
+    Accessor(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
+
+    ALWAYS_INLINE uint32_t Row() const { return row_; }
+
+    ALWAYS_INLINE bool IsValid() const { return table_ != nullptr && row_ < table_->NumRows(); }
+
+    template<uint32_t Column>
+    ALWAYS_INLINE uint32_t Get() const {
+      static_assert(Column < kNumColumns, "Column out of bounds");
+      return table_->Get(row_, Column);
+    }
+
+    ALWAYS_INLINE bool Equals(const Accessor& other) {
+      return this->table_ == other.table_ && this->row_ == other.row_;
+    }
+
+    Accessor& operator++() {
+      row_++;
+      return *this;
+    }
+
+   protected:
+    const BitTable* table_;
+    uint32_t row_;
+  };
+
+  static constexpr uint32_t kValueBias = -1;
+
+  BitTable() {}
+  BitTable(void* data, size_t size, size_t* bit_offset = 0) {
+    Decode(BitMemoryRegion(MemoryRegion(data, size)), bit_offset);
+  }
+
+  ALWAYS_INLINE void Decode(BitMemoryRegion region, size_t* bit_offset) {
+    // Decode row count and column sizes from the table header.
+    num_rows_ = DecodeVarintBits(region, bit_offset);
+    if (num_rows_ != 0) {
+      column_offset_[0] = 0;
+      for (uint32_t i = 0; i < kNumColumns; i++) {
+        size_t column_end = column_offset_[i] + DecodeVarintBits(region, bit_offset);
+        column_offset_[i + 1] = column_end;
+        DCHECK_EQ(column_offset_[i + 1], column_end) << "Overflow";
+      }
+    }
+
+    // Record the region which contains the table data and skip past it.
+    table_data_ = region.Subregion(*bit_offset, num_rows_ * NumRowBits());
+    *bit_offset += table_data_.size_in_bits();
+  }
+
+  ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
+    DCHECK_LT(row, num_rows_);
+    DCHECK_LT(column, kNumColumns);
+    size_t offset = row * NumRowBits() + column_offset_[column];
+    return table_data_.LoadBits(offset, NumColumnBits(column)) + kValueBias;
+  }
+
+  size_t NumRows() const { return num_rows_; }
+
+  uint32_t NumRowBits() const { return column_offset_[kNumColumns]; }
+
+  constexpr size_t NumColumns() const { return kNumColumns; }
+
+  uint32_t NumColumnBits(uint32_t column) const {
+    return column_offset_[column + 1] - column_offset_[column];
+  }
+
+  size_t DataBitSize() const { return num_rows_ * column_offset_[kNumColumns]; }
+
+ protected:
+  BitMemoryRegion table_data_;
+  size_t num_rows_ = 0;
+
+  uint16_t column_offset_[kNumColumns + 1] = {};
+};
+
+template<uint32_t kNumColumns>
+constexpr uint32_t BitTable<kNumColumns>::Accessor::kNoValue;
+
+template<uint32_t kNumColumns>
+constexpr uint32_t BitTable<kNumColumns>::kValueBias;
+
+template<uint32_t kNumColumns, typename Alloc = std::allocator<uint32_t>>
+class BitTableBuilder {
+ public:
+  explicit BitTableBuilder(Alloc alloc = Alloc()) : buffer_(alloc) {}
+
+  template<typename ... T>
+  uint32_t AddRow(T ... values) {
+    constexpr size_t count = sizeof...(values);
+    static_assert(count == kNumColumns, "Incorrect argument count");
+    uint32_t data[count] = { values... };
+    buffer_.insert(buffer_.end(), data, data + count);
+    return num_rows_++;
+  }
+
+  ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column) const {
+    return buffer_[row * kNumColumns + column];
+  }
+
+  template<typename Vector>
+  void Encode(Vector* out, size_t* bit_offset) {
+    constexpr uint32_t bias = BitTable<kNumColumns>::kValueBias;
+    size_t initial_bit_offset = *bit_offset;
+    // Measure data size.
+    uint32_t max_column_value[kNumColumns] = {};
+    for (uint32_t r = 0; r < num_rows_; r++) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        max_column_value[c] |= Get(r, c) - bias;
+      }
+    }
+    // Write table header.
+    uint32_t table_data_bits = 0;
+    uint32_t column_bits[kNumColumns] = {};
+    EncodeVarintBits(out, bit_offset, num_rows_);
+    if (num_rows_ != 0) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        column_bits[c] = MinimumBitsToStore(max_column_value[c]);
+        EncodeVarintBits(out, bit_offset, column_bits[c]);
+        table_data_bits += num_rows_ * column_bits[c];
+      }
+    }
+    // Write table data.
+    out->resize(BitsToBytesRoundUp(*bit_offset + table_data_bits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    for (uint32_t r = 0; r < num_rows_; r++) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        region.StoreBitsAndAdvance(bit_offset, Get(r, c) - bias, column_bits[c]);
+      }
+    }
+    // Verify the written data.
+    if (kIsDebugBuild) {
+      BitTable<kNumColumns> table;
+      table.Decode(region, &initial_bit_offset);
+      DCHECK_EQ(this->num_rows_, table.NumRows());
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        DCHECK_EQ(column_bits[c], table.NumColumnBits(c));
+      }
+      for (uint32_t r = 0; r < num_rows_; r++) {
+        for (uint32_t c = 0; c < kNumColumns; c++) {
+          DCHECK_EQ(this->Get(r, c), table.Get(r, c)) << " (" << r << ", " << c << ")";
+        }
+      }
+    }
+  }
+
+ protected:
+  std::vector<uint32_t, Alloc> buffer_;
+  uint32_t num_rows_ = 0;
+};
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_BIT_TABLE_H_
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
new file mode 100644
index 0000000..25bfcf0
--- /dev/null
+++ b/libartbase/base/bit_table_test.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_table.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(BitTableTest, TestVarint) {
+  for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
+    uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
+    for (uint32_t value : values) {
+      std::vector<uint8_t> buffer;
+      size_t encode_bit_offset = start_bit_offset;
+      EncodeVarintBits(&buffer, &encode_bit_offset, value);
+
+      size_t decode_bit_offset = start_bit_offset;
+      BitMemoryRegion region(MemoryRegion(buffer.data(), buffer.size()));
+      uint32_t result = DecodeVarintBits(region, &decode_bit_offset);
+      EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+      EXPECT_EQ(value, result);
+    }
+  }
+}
+
+TEST(BitTableTest, TestEmptyTable) {
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<1> builder;
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(0u, table.NumRows());
+}
+
+TEST(BitTableTest, TestSingleColumnTable) {
+  constexpr uint32_t kNoValue = -1;
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<1> builder;
+  builder.AddRow(42u);
+  builder.AddRow(kNoValue);
+  builder.AddRow(1000u);
+  builder.AddRow(kNoValue);
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(4u, table.NumRows());
+  EXPECT_EQ(42u, table.Get(0));
+  EXPECT_EQ(kNoValue, table.Get(1));
+  EXPECT_EQ(1000u, table.Get(2));
+  EXPECT_EQ(kNoValue, table.Get(3));
+  EXPECT_EQ(10u, table.NumColumnBits(0));
+}
+
+TEST(BitTableTest, TestUnalignedTable) {
+  for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
+    std::vector<uint8_t> buffer;
+    size_t encode_bit_offset = start_bit_offset;
+    BitTableBuilder<1> builder;
+    builder.AddRow(42u);
+    builder.Encode(&buffer, &encode_bit_offset);
+
+    size_t decode_bit_offset = start_bit_offset;
+    BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+    EXPECT_EQ(encode_bit_offset, decode_bit_offset) << " start_bit_offset=" << start_bit_offset;
+    EXPECT_EQ(1u, table.NumRows());
+    EXPECT_EQ(42u, table.Get(0));
+  }
+}
+
+TEST(BitTableTest, TestBigTable) {
+  constexpr uint32_t kNoValue = -1;
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<4> builder;
+  builder.AddRow(42u, kNoValue, 0u, static_cast<uint32_t>(-2));
+  builder.AddRow(62u, kNoValue, 63u, static_cast<uint32_t>(-3));
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<4> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(2u, table.NumRows());
+  EXPECT_EQ(42u, table.Get(0, 0));
+  EXPECT_EQ(kNoValue, table.Get(0, 1));
+  EXPECT_EQ(0u, table.Get(0, 2));
+  EXPECT_EQ(static_cast<uint32_t>(-2), table.Get(0, 3));
+  EXPECT_EQ(62u, table.Get(1, 0));
+  EXPECT_EQ(kNoValue, table.Get(1, 1));
+  EXPECT_EQ(63u, table.Get(1, 2));
+  EXPECT_EQ(static_cast<uint32_t>(-3), table.Get(1, 3));
+  EXPECT_EQ(6u, table.NumColumnBits(0));
+  EXPECT_EQ(0u, table.NumColumnBits(1));
+  EXPECT_EQ(7u, table.NumColumnBits(2));
+  EXPECT_EQ(32u, table.NumColumnBits(3));
+}
+
+}  // namespace art
diff --git a/libartbase/base/bit_utils.h b/libartbase/base/bit_utils.h
index 04f0e85..58cc78c 100644
--- a/libartbase/base/bit_utils.h
+++ b/libartbase/base/bit_utils.h
@@ -22,6 +22,7 @@
 
 #include <android-base/logging.h>
 
+#include "globals.h"
 #include "stl_util_identity.h"
 
 namespace art {
@@ -499,6 +500,10 @@
   return bitfield_unsigned;
 }
 
+inline static constexpr size_t BitsToBytesRoundUp(size_t num_bits) {
+  return RoundUp(num_bits, kBitsPerByte) / kBitsPerByte;
+}
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_BIT_UTILS_H_
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index a4764c2..fe988a4 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -207,23 +207,11 @@
   }
 
 #define TEST_DISABLED_FOR_MEMORY_TOOL() \
-  if (RUNNING_ON_MEMORY_TOOL > 0) { \
+  if (kRunningOnMemoryTool) { \
     printf("WARNING: TEST DISABLED FOR MEMORY TOOL\n"); \
     return; \
   }
 
-#define TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND() \
-  if (RUNNING_ON_MEMORY_TOOL > 0 && kMemoryToolIsValgrind) { \
-    printf("WARNING: TEST DISABLED FOR MEMORY TOOL VALGRIND\n"); \
-    return; \
-  }
-
-#define TEST_DISABLED_FOR_MEMORY_TOOL_ASAN() \
-  if (RUNNING_ON_MEMORY_TOOL > 0 && !kMemoryToolIsValgrind) { \
-    printf("WARNING: TEST DISABLED FOR MEMORY TOOL ASAN\n"); \
-    return; \
-  }
-
 #define TEST_DISABLED_FOR_HEAP_POISONING() \
   if (kPoisonHeapReferences) { \
     printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index 9450e1e..56934ac 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -264,7 +264,8 @@
 
 bool LocationIsOnSystem(const char* path) {
   UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
-  return path != nullptr && android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
+  return full_path != nullptr &&
+      android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
 }
 
 bool LocationIsOnSystemFramework(const char* full_path) {
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 69d1a64..39e0c50 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -38,6 +38,9 @@
 // compile-time constant so the compiler can generate better code.
 static constexpr int kPageSize = 4096;
 
+// Size of Dex virtual registers.
+static constexpr size_t kVRegSize = 4;
+
 // Returns whether the given memory offset can be used for generating
 // an implicit null check.
 static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 144b06c..15a5d71 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -53,7 +53,7 @@
     memory_ = unaligned_memory_;
   } else {
     memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
-    if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+    if (kRunningOnMemoryTool) {
       size_t head = memory_ - unaligned_memory_;
       size_t tail = overallocation - head;
       MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
@@ -66,7 +66,7 @@
 
 MallocArena::~MallocArena() {
   constexpr size_t overallocation = RequiredOverallocation();
-  if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+  if (overallocation != 0u && kRunningOnMemoryTool) {
     size_t head = memory_ - unaligned_memory_;
     size_t tail = overallocation - head;
     MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
@@ -132,7 +132,7 @@
 }
 
 void MallocArenaPool::FreeArenaChain(Arena* first) {
-  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+  if (kRunningOnMemoryTool) {
     for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
       MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
     }
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index c455fed..9ba1d6c 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -460,7 +460,7 @@
       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
   size_t redzone_size = 0;
-  if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
+  if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
     redzone_size = kPageSize;
     page_aligned_byte_count += redzone_size;
   }
@@ -649,9 +649,11 @@
 bool MemMap::Sync() {
   bool result;
   if (redzone_size_ != 0) {
-    // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
-    // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
-    // noaccess protection from the msync range. b/27552451.
+    // To avoid errors when running on a memory tool, temporarily lift the lower-end noaccess
+    // protection before passing it to msync() as it only accepts page-aligned base address,
+    // and exclude the higher-end noaccess protection from the msync range. b/27552451.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether this special case is needed for ASan.
     uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
     MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
     result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index d956126..4a78bdc 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -471,31 +471,33 @@
   // cannot allocate in the 2GB-4GB region.
   TEST_DISABLED_FOR_MIPS();
 
+  // This test may not work under Valgrind.
+  // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+  // check whether this test works with ASan.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
+
   CommonInit();
-  // This test may not work under valgrind.
-  if (RUNNING_ON_MEMORY_TOOL == 0) {
-    constexpr size_t size = 0x100000;
-    // Try all addresses starting from 2GB to 4GB.
-    size_t start_addr = 2 * GB;
-    std::string error_msg;
-    std::unique_ptr<MemMap> map;
-    for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
-      map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
-                                     reinterpret_cast<uint8_t*>(start_addr),
-                                     size,
-                                     PROT_READ | PROT_WRITE,
-                                     /*low_4gb*/true,
-                                     false,
-                                     &error_msg));
-      if (map != nullptr) {
-        break;
-      }
+  constexpr size_t size = 0x100000;
+  // Try all addresses starting from 2GB to 4GB.
+  size_t start_addr = 2 * GB;
+  std::string error_msg;
+  std::unique_ptr<MemMap> map;
+  for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
+    map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+                                   reinterpret_cast<uint8_t*>(start_addr),
+                                   size,
+                                   PROT_READ | PROT_WRITE,
+                                   /*low_4gb*/true,
+                                   false,
+                                   &error_msg));
+    if (map != nullptr) {
+      break;
     }
-    ASSERT_TRUE(map.get() != nullptr) << error_msg;
-    ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
-    ASSERT_TRUE(error_msg.empty());
-    ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
   }
+  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
 }
 
 TEST_F(MemMapTest, MapAnonymousOverflow) {
diff --git a/libartbase/base/memory_region.cc b/libartbase/base/memory_region.cc
index 862ff73..d207872 100644
--- a/libartbase/base/memory_region.cc
+++ b/libartbase/base/memory_region.cc
@@ -29,36 +29,4 @@
   memmove(reinterpret_cast<void*>(begin() + offset), from.pointer(), from.size());
 }
 
-void MemoryRegion::StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
-  DCHECK_LE(value, MaxInt<uint32_t>(length));
-  DCHECK_LE(length, BitSizeOf<uint32_t>());
-  DCHECK_LE(bit_offset + length, size_in_bits());
-  if (length == 0) {
-    return;
-  }
-  // Bits are stored in this order {7 6 5 4 3 2 1 0}.
-  // How many remaining bits in current byte is (bit_offset % kBitsPerByte) + 1.
-  uint8_t* out = ComputeInternalPointer<uint8_t>(bit_offset >> kBitsPerByteLog2);
-  size_t orig_len = length;
-  uint32_t orig_value = value;
-  uintptr_t bit_remainder = bit_offset % kBitsPerByte;
-  while (true) {
-    const uintptr_t remaining_bits = kBitsPerByte - bit_remainder;
-    if (length <= remaining_bits) {
-      // Length is smaller than all of remainder bits.
-      size_t mask = ((1 << length) - 1) << bit_remainder;
-      *out = (*out & ~mask) | (value << bit_remainder);
-      break;
-    }
-    // Copy remaining bits in current byte.
-    size_t value_mask = (1 << remaining_bits) - 1;
-    *out = (*out & ~(value_mask << bit_remainder)) | ((value & value_mask) << bit_remainder);
-    value >>= remaining_bits;
-    bit_remainder = 0;
-    length -= remaining_bits;
-    ++out;
-  }
-  DCHECK_EQ(LoadBits(bit_offset, orig_len), orig_value) << bit_offset << " " << orig_len;
-}
-
 }  // namespace art
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 3d00f5b..2060329 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -109,67 +109,6 @@
     return ComputeInternalPointer<T>(offset);
   }
 
-  // Load a single bit in the region. The bit at offset 0 is the least
-  // significant bit in the first byte.
-  ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
-    uint8_t bit_mask;
-    uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
-    return byte & bit_mask;
-  }
-
-  ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
-    uint8_t bit_mask;
-    uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
-    if (value) {
-      *byte |= bit_mask;
-    } else {
-      *byte &= ~bit_mask;
-    }
-  }
-
-  // Load `length` bits from the region starting at bit offset `bit_offset`.
-  // The bit at the smallest offset is the least significant bit in the
-  // loaded value.  `length` must not be larger than the number of bits
-  // contained in the return value (32).
-  ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
-    DCHECK_LE(length, BitSizeOf<uint32_t>());
-    DCHECK_LE(bit_offset + length, size_in_bits());
-    if (UNLIKELY(length == 0)) {
-      // Do not touch any memory if the range is empty.
-      return 0;
-    }
-    const uint8_t* address = begin() + bit_offset / kBitsPerByte;
-    const uint32_t shift = bit_offset & (kBitsPerByte - 1);
-    // Load the value (reading only the strictly needed bytes).
-    const uint32_t load_bit_count = shift + length;
-    uint32_t value = address[0] >> shift;
-    if (load_bit_count > 8) {
-      value |= static_cast<uint32_t>(address[1]) << (8 - shift);
-      if (load_bit_count > 16) {
-        value |= static_cast<uint32_t>(address[2]) << (16 - shift);
-        if (load_bit_count > 24) {
-          value |= static_cast<uint32_t>(address[3]) << (24 - shift);
-          if (load_bit_count > 32) {
-            value |= static_cast<uint32_t>(address[4]) << (32 - shift);
-          }
-        }
-      }
-    }
-    // Clear unwanted most significant bits.
-    uint32_t clear_bit_count = BitSizeOf(value) - length;
-    value = (value << clear_bit_count) >> clear_bit_count;
-    for (size_t i = 0; i < length; ++i) {
-      DCHECK_EQ((value >> i) & 1, LoadBit(bit_offset + i));
-    }
-    return value;
-  }
-
-  // Store `value` on `length` bits in the region starting at bit offset
-  // `bit_offset`.  The bit at the smallest offset is the least significant
-  // bit of the stored `value`.  `value` must not be larger than `length`
-  // bits.
-  void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length);
-
   void CopyFrom(size_t offset, const MemoryRegion& from) const;
 
   template<class Vector>
diff --git a/libartbase/base/memory_region_test.cc b/libartbase/base/memory_region_test.cc
index e3aead4..72e03a4 100644
--- a/libartbase/base/memory_region_test.cc
+++ b/libartbase/base/memory_region_test.cc
@@ -18,8 +18,6 @@
 
 #include "gtest/gtest.h"
 
-#include "bit_memory_region.h"
-
 namespace art {
 
 TEST(MemoryRegion, LoadUnaligned) {
@@ -57,35 +55,4 @@
   }
 }
 
-TEST(MemoryRegion, TestBits) {
-  const size_t n = 8;
-  uint8_t data[n] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-  MemoryRegion region(&data, n);
-  uint32_t value = 0xDEADBEEF;
-  // Try various offsets and lengths.
-  for (size_t bit_offset = 0; bit_offset < 2 * kBitsPerByte; ++bit_offset) {
-    for (size_t length = 0; length < 2 * kBitsPerByte; ++length) {
-      const uint32_t length_mask = (1 << length) - 1;
-      uint32_t masked_value = value & length_mask;
-      BitMemoryRegion bmr(region, bit_offset, length);
-      region.StoreBits(bit_offset, masked_value, length);
-      EXPECT_EQ(region.LoadBits(bit_offset, length), masked_value);
-      EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
-      // Check adjacent bits to make sure they were not incorrectly cleared.
-      EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
-      EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
-      region.StoreBits(bit_offset, length_mask, length);
-      // Store with bit memory region.
-      bmr.StoreBits(0, masked_value, length);
-      EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
-      // Check adjacent bits to make sure they were not incorrectly cleared.
-      EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
-      EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
-      region.StoreBits(bit_offset, length_mask, length);
-      // Flip the value to try different edge bit combinations.
-      value = ~value;
-    }
-  }
-}
-
 }  // namespace art
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index e1df99f..d381f01 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -19,53 +19,53 @@
 
 #include <stddef.h>
 
+namespace art {
+
 #if !defined(__has_feature)
-#define __has_feature(x) 0
+# define __has_feature(x) 0
 #endif
 
 #if __has_feature(address_sanitizer)
 
-#include <sanitizer/asan_interface.h>
-#define ADDRESS_SANITIZER
+# include <sanitizer/asan_interface.h>
+# define ADDRESS_SANITIZER
 
-#ifdef ART_ENABLE_ADDRESS_SANITIZER
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+# ifdef ART_ENABLE_ADDRESS_SANITIZER
+#  define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
+#  define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
+#  define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
 constexpr bool kMemoryToolIsAvailable = true;
-#else
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# else
+#  define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+#  define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+#  define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
 constexpr bool kMemoryToolIsAvailable = false;
-#endif
+# endif
 
 extern "C" void __asan_handle_no_return();
 
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
-#define RUNNING_ON_MEMORY_TOOL 1U
-constexpr bool kMemoryToolIsValgrind = false;
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+# define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
+constexpr bool kRunningOnMemoryTool = true;
 constexpr bool kMemoryToolDetectsLeaks = true;
 constexpr bool kMemoryToolAddsRedzones = true;
 constexpr size_t kMemoryToolStackGuardSizeScale = 2;
 
 #else
 
-#include <memcheck/memcheck.h>
-#include <valgrind.h>
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) VALGRIND_MAKE_MEM_NOACCESS(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS
-#define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
-#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
-constexpr bool kMemoryToolIsAvailable = true;
-constexpr bool kMemoryToolIsValgrind = true;
-constexpr bool kMemoryToolDetectsLeaks = true;
-constexpr bool kMemoryToolAddsRedzones = true;
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS
+# define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
+constexpr bool kRunningOnMemoryTool = false;
+constexpr bool kMemoryToolIsAvailable = false;
+constexpr bool kMemoryToolDetectsLeaks = false;
+constexpr bool kMemoryToolAddsRedzones = false;
 constexpr size_t kMemoryToolStackGuardSizeScale = 1;
 
 #endif
 
+}  // namespace art
+
 #endif  // ART_LIBARTBASE_BASE_MEMORY_TOOL_H_
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index 4193981..679bcc0 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -228,7 +228,7 @@
  protected:
   // Used for variable sized objects such as RegisterLine.
   ALWAYS_INLINE void ProtectMemory(T* ptr, size_t size) const {
-    if (RUNNING_ON_MEMORY_TOOL > 0) {
+    if (kRunningOnMemoryTool) {
       // Writing to the memory will fail ift we already destroyed the pointer with
       // DestroyOnlyDelete since we make it no access.
       memset(ptr, kMagicFill, size);
diff --git a/libdexfile/dex/dex_file_tracking_registrar.cc b/libdexfile/dex/dex_file_tracking_registrar.cc
index 78ea9c1..551bea1 100644
--- a/libdexfile/dex/dex_file_tracking_registrar.cc
+++ b/libdexfile/dex/dex_file_tracking_registrar.cc
@@ -130,7 +130,8 @@
     MEMORY_TOOL_MAKE_NOACCESS(begin, size);
   } else {
     // Note: MEMORY_TOOL_MAKE_UNDEFINED has the same functionality with Address
-    // Sanitizer. The difference has not been tested with Valgrind
+    // Sanitizer.
+    // Historical note: The difference has not been tested with Valgrind.
     MEMORY_TOOL_MAKE_DEFINED(begin, size);
   }
 }
diff --git a/libdexfile/dex/dex_instruction-inl.h b/libdexfile/dex/dex_instruction-inl.h
index 6bef18c..e0cffdd 100644
--- a/libdexfile/dex/dex_instruction-inl.h
+++ b/libdexfile/dex/dex_instruction-inl.h
@@ -508,7 +508,7 @@
   return (FormatOf(Opcode()) == k35c) || (FormatOf(Opcode()) == k45cc);
 }
 
-inline void Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
+inline uint32_t Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
   DCHECK(HasVarArgs());
 
   /*
@@ -551,6 +551,7 @@
     default:  // case 0
       break;  // Valid, but no need to do anything.
   }
+  return count;
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index bf50836..6807025 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -462,8 +462,8 @@
 
   // Fills the given array with the 'arg' array of the instruction.
   bool HasVarArgs() const;
-  void GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
-  void GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
+  uint32_t GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
+  uint32_t GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
     return GetVarArgs(args, Fetch16(0));
   }
 
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 44050ff..5c20efa 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -753,7 +753,7 @@
       kByteKindQuickMethodHeader,
       kByteKindCodeInfoLocationCatalog,
       kByteKindCodeInfoDexRegisterMap,
-      kByteKindCodeInfoEncoding,
+      kByteKindCodeInfo,
       kByteKindCodeInfoInvokeInfo,
       kByteKindCodeInfoStackMasks,
       kByteKindCodeInfoRegisterMasks,
@@ -800,7 +800,7 @@
       if (sum > 0) {
         Dump(os, "Code                            ", bits[kByteKindCode], sum);
         Dump(os, "QuickMethodHeader               ", bits[kByteKindQuickMethodHeader], sum);
-        Dump(os, "CodeInfoEncoding                ", bits[kByteKindCodeInfoEncoding], sum);
+        Dump(os, "CodeInfo                        ", bits[kByteKindCodeInfo], sum);
         Dump(os, "CodeInfoLocationCatalog         ", bits[kByteKindCodeInfoLocationCatalog], sum);
         Dump(os, "CodeInfoDexRegisterMap          ", bits[kByteKindCodeInfoDexRegisterMap], sum);
         Dump(os, "CodeInfoStackMasks              ", bits[kByteKindCodeInfoStackMasks], sum);
@@ -819,7 +819,7 @@
                stack_map_bits,
                "stack map");
           Dump(os,
-               "StackMapDexPcEncoding         ",
+               "StackMapDexPc                 ",
                bits[kByteKindStackMapDexPc],
                stack_map_bits,
                "stack map");
@@ -1732,8 +1732,7 @@
    public:
     explicit StackMapsHelper(const uint8_t* raw_code_info, InstructionSet instruction_set)
         : code_info_(raw_code_info),
-          encoding_(code_info_.ExtractEncoding()),
-          number_of_stack_maps_(code_info_.GetNumberOfStackMaps(encoding_)),
+          number_of_stack_maps_(code_info_.GetNumberOfStackMaps()),
           indexes_(),
           offset_(static_cast<uint32_t>(-1)),
           stack_map_index_(0u),
@@ -1741,11 +1740,11 @@
       if (number_of_stack_maps_ != 0u) {
         // Check if native PCs are ordered.
         bool ordered = true;
-        StackMap last = code_info_.GetStackMapAt(0u, encoding_);
+        StackMap last = code_info_.GetStackMapAt(0u);
         for (size_t i = 1; i != number_of_stack_maps_; ++i) {
-          StackMap current = code_info_.GetStackMapAt(i, encoding_);
-          if (last.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set) >
-              current.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set)) {
+          StackMap current = code_info_.GetStackMapAt(i);
+          if (last.GetNativePcOffset(instruction_set) >
+              current.GetNativePcOffset(instruction_set)) {
             ordered = false;
             break;
           }
@@ -1760,18 +1759,15 @@
           std::sort(indexes_.begin(),
                     indexes_.end(),
                     [this](size_t lhs, size_t rhs) {
-                      StackMap left = code_info_.GetStackMapAt(lhs, encoding_);
-                      uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map.encoding,
-                                                                instruction_set_);
-                      StackMap right = code_info_.GetStackMapAt(rhs, encoding_);
-                      uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map.encoding,
-                                                                  instruction_set_);
+                      StackMap left = code_info_.GetStackMapAt(lhs);
+                      uint32_t left_pc = left.GetNativePcOffset(instruction_set_);
+                      StackMap right = code_info_.GetStackMapAt(rhs);
+                      uint32_t right_pc = right.GetNativePcOffset(instruction_set_);
                       // If the PCs are the same, compare indexes to preserve the original order.
                       return (left_pc < right_pc) || (left_pc == right_pc && lhs < rhs);
                     });
         }
-        offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map.encoding,
-                                                     instruction_set_);
+        offset_ = GetStackMapAt(0).GetNativePcOffset(instruction_set_);
       }
     }
 
@@ -1779,10 +1775,6 @@
       return code_info_;
     }
 
-    const CodeInfoEncoding& GetEncoding() const {
-      return encoding_;
-    }
-
     uint32_t GetOffset() const {
       return offset_;
     }
@@ -1795,8 +1787,7 @@
       ++stack_map_index_;
       offset_ = (stack_map_index_ == number_of_stack_maps_)
           ? static_cast<uint32_t>(-1)
-          : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map.encoding,
-                                                              instruction_set_);
+          : GetStackMapAt(stack_map_index_).GetNativePcOffset(instruction_set_);
     }
 
    private:
@@ -1805,11 +1796,10 @@
         i = indexes_[i];
       }
       DCHECK_LT(i, number_of_stack_maps_);
-      return code_info_.GetStackMapAt(i, encoding_);
+      return code_info_.GetStackMapAt(i);
     }
 
     const CodeInfo code_info_;
-    const CodeInfoEncoding encoding_;
     const size_t number_of_stack_maps_;
     dchecked_vector<size_t> indexes_;  // Used if stack map native PCs are not ordered.
     uint32_t offset_;
@@ -1835,79 +1825,75 @@
       StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
       MethodInfo method_info(oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo());
       {
-        CodeInfoEncoding encoding(helper.GetEncoding());
-        StackMapEncoding stack_map_encoding(encoding.stack_map.encoding);
-        const size_t num_stack_maps = encoding.stack_map.num_entries;
-        if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfoEncoding,
-                                   encoding.HeaderSize() * kBitsPerByte,
+        const CodeInfo code_info = helper.GetCodeInfo();
+        const BitTable<StackMap::kCount>& stack_maps = code_info.stack_maps_;
+        const size_t num_stack_maps = stack_maps.NumRows();
+        if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfo,
+                                   code_info.size_ * kBitsPerByte,
                                    oat_method.GetVmapTable())) {
           // Stack maps
           stats_.AddBits(
               Stats::kByteKindStackMapNativePc,
-              stack_map_encoding.GetNativePcEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kNativePcOffset) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapDexPc,
-              stack_map_encoding.GetDexPcEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kDexPc) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapDexRegisterMap,
-              stack_map_encoding.GetDexRegisterMapEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kDexRegisterMapOffset) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapInlineInfoIndex,
-              stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kInlineInfoIndex) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapRegisterMaskIndex,
-              stack_map_encoding.GetRegisterMaskIndexEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kRegisterMaskIndex) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapStackMaskIndex,
-              stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kStackMaskIndex) * num_stack_maps);
 
           // Stack masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoStackMasks,
-              encoding.stack_mask.encoding.BitSize() * encoding.stack_mask.num_entries);
+              code_info.stack_masks_.size_in_bits());
 
           // Register masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoRegisterMasks,
-              encoding.register_mask.encoding.BitSize() * encoding.register_mask.num_entries);
+              code_info.register_masks_.DataBitSize());
 
           // Invoke infos
-          if (encoding.invoke_info.num_entries > 0u) {
-            stats_.AddBits(
-                Stats::kByteKindCodeInfoInvokeInfo,
-                encoding.invoke_info.encoding.BitSize() * encoding.invoke_info.num_entries);
-          }
+          stats_.AddBits(
+              Stats::kByteKindCodeInfoInvokeInfo,
+              code_info.invoke_infos_.DataBitSize());
 
           // Location catalog
           const size_t location_catalog_bytes =
-              helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
+              helper.GetCodeInfo().GetDexRegisterLocationCatalogSize();
           stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
                          kBitsPerByte * location_catalog_bytes);
           // Dex register bytes.
           const size_t dex_register_bytes =
-              helper.GetCodeInfo().GetDexRegisterMapsSize(encoding,
-                                                          code_item_accessor.RegistersSize());
+              helper.GetCodeInfo().GetDexRegisterMapsSize(code_item_accessor.RegistersSize());
           stats_.AddBits(
               Stats::kByteKindCodeInfoDexRegisterMap,
               kBitsPerByte * dex_register_bytes);
 
           // Inline infos.
-          const size_t num_inline_infos = encoding.inline_info.num_entries;
+          const BitTable<InlineInfo::kCount>& inline_infos = code_info.inline_infos_;
+          const size_t num_inline_infos = inline_infos.NumRows();
           if (num_inline_infos > 0u) {
             stats_.AddBits(
                 Stats::kByteKindInlineInfoMethodIndexIdx,
-                encoding.inline_info.encoding.GetMethodIndexIdxEncoding().BitSize() *
-                    num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kMethodIndexIdx) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoDexPc,
-                encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kDexPc) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoExtraData,
-                encoding.inline_info.encoding.GetExtraDataEncoding().BitSize() * num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kExtraData) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoDexRegisterMap,
-                encoding.inline_info.encoding.GetDexRegisterMapEncoding().BitSize() *
-                    num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kDexRegisterMapOffset) * num_inline_infos);
             stats_.AddBits(Stats::kByteKindInlineInfoIsLast, num_inline_infos);
           }
         }
@@ -1922,7 +1908,6 @@
           DCHECK(stack_map.IsValid());
           stack_map.Dump(vios,
                          helper.GetCodeInfo(),
-                         helper.GetEncoding(),
                          method_info,
                          oat_method.GetCodeOffset(),
                          code_item_accessor.RegistersSize(),
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index b85730d..bbe89ca 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -162,7 +162,6 @@
         // Code and dex code do not show up if list only.
         expected_prefixes.push_back("DEX CODE:");
         expected_prefixes.push_back("CODE:");
-        expected_prefixes.push_back("CodeInfoEncoding");
         expected_prefixes.push_back("CodeInfoInlineInfo");
       }
       if (mode == kModeArt) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index dc9d990..0889a8e 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -610,7 +610,7 @@
     }
   }
 
-  if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
     // We want to just exit on non-debug builds, not bringing the runtime down
     // in an orderly fashion. So release the following fields.
     runtime.release();
@@ -690,7 +690,7 @@
     }
   }
 
-  if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
     // We want to just exit on non-debug builds, not bringing the runtime down
     // in an orderly fashion. So release the following fields.
     runtime.release();
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 1ba4070..d4ceede 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "common_runtime_test.h"
 #include "quick/quick_method_frame_info.h"
 
@@ -57,21 +58,6 @@
   void FinalizeSetup() OVERRIDE {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
-
-  static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
-      NO_THREAD_SAFETY_ANALYSIS {
-    Runtime* const runtime = Runtime::Current();
-    Thread* const self = Thread::Current();
-    ScopedObjectAccess soa(self);  // So we can create callee-save methods.
-
-    runtime->SetInstructionSet(isa);
-    ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
-    runtime->SetCalleeSaveMethod(save_method, type);
-    QuickMethodFrameInfo frame_info =  runtime->GetRuntimeMethodFrameInfo(save_method);
-    EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
-        << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
-        << frame_info.FpSpillMask() << std::dec;
-  }
 };
 
 TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
@@ -205,26 +191,20 @@
 }  // namespace x86_64
 
 // Check architecture specific constants are sound.
-#define TEST_ARCH(Arch, arch)                                       \
-  TEST_F(ArchTest, Arch) {                                          \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveAllCalleeSaves,             \
-                   arch::kFrameSizeSaveAllCalleeSaves);             \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveRefsOnly,                   \
-                   arch::kFrameSizeSaveRefsOnly);                   \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveRefsAndArgs,                \
-                   arch::kFrameSizeSaveRefsAndArgs);                \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverything,                 \
-                   arch::kFrameSizeSaveEverything);                 \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverythingForClinit,        \
-                   arch::kFrameSizeSaveEverythingForClinit);        \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverythingForSuspendCheck,  \
-                   arch::kFrameSizeSaveEverythingForSuspendCheck);  \
+// We expect the return PC to be stored at the highest address slot in the frame.
+#define TEST_ARCH_TYPE(Arch, arch, type)                                              \
+  EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetFrameSize(CalleeSaveType::k##type),       \
+            arch::kFrameSize##type);                                                  \
+  EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::k##type),  \
+            arch::kFrameSize##type - static_cast<size_t>(k##Arch##PointerSize))
+#define TEST_ARCH(Arch, arch)                                   \
+  TEST_F(ArchTest, Arch) {                                      \
+    TEST_ARCH_TYPE(Arch, arch, SaveAllCalleeSaves);             \
+    TEST_ARCH_TYPE(Arch, arch, SaveRefsOnly);                   \
+    TEST_ARCH_TYPE(Arch, arch, SaveRefsAndArgs);                \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverything);                 \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverythingForClinit);        \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverythingForSuspendCheck);  \
   }
 TEST_ARCH(Arm, arm)
 TEST_ARCH(Arm64, arm64)
diff --git a/runtime/arch/arm/callee_save_frame_arm.h b/runtime/arch/arm/callee_save_frame_arm.h
new file mode 100644
index 0000000..11eefb9
--- /dev/null
+++ b/runtime/arch/arm/callee_save_frame_arm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm.h"
+
+namespace art {
+namespace arm {
+
+static constexpr uint32_t kArmCalleeSaveAlwaysSpills =
+    (1 << art::arm::LR);
+static constexpr uint32_t kArmCalleeSaveRefSpills =
+    (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) | (1 << art::arm::R8) |
+    (1 << art::arm::R10) | (1 << art::arm::R11);
+static constexpr uint32_t kArmCalleeSaveArgSpills =
+    (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
+static constexpr uint32_t kArmCalleeSaveAllSpills =
+    (1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveEverythingSpills =
+    (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
+    (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
+
+static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpArgSpills =
+    (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2)  | (1 << art::arm::S3)  |
+    (1 << art::arm::S4)  | (1 << art::arm::S5)  | (1 << art::arm::S6)  | (1 << art::arm::S7)  |
+    (1 << art::arm::S8)  | (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
+    (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15);
+static constexpr uint32_t kArmCalleeSaveFpAllSpills =
+    (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
+    (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
+    (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
+    (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
+    kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
+
+class ArmCalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kArmPointerSize);
+  }
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index cd00125..311e838 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -55,7 +55,7 @@
     @ Load kSaveAllCalleeSaves Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
      // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
@@ -86,7 +86,7 @@
     @ Load kSaveRefsOnly Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_REFS_ONLY != 28 + 4)
@@ -147,13 +147,13 @@
     @ Load kSaveRefsAndArgs Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 .endm
 
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
     SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
     str r0, [sp, #0]                              @ Store ArtMethod* to bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 .endm
 
 .macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -193,7 +193,7 @@
     @ Load kSaveEverything Method* into rTemp.
     ldr \rTemp, [\rTemp, #\runtime_method_offset]
     str \rTemp, [sp, #0]                @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8)
@@ -301,7 +301,7 @@
      * exception is Thread::Current()->exception_ when the runtime method frame is ready.
      */
 .macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    mov    r0, r9                              @ pass Thread::Current
+    mov    r0, rSELF                           @ pass Thread::Current
     bl     artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*)
 .endm
 
@@ -318,7 +318,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0       @ save all registers as basis for long jump context
-    mov r0, r9                      @ pass Thread::Current
+    mov r0, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -327,7 +327,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_EVERYTHING_FRAME r0  @ save all registers as basis for long jump context
-    mov r0, r9                      @ pass Thread::Current
+    mov r0, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -336,7 +336,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1       @ save all registers as basis for long jump context
-    mov r1, r9                      @ pass Thread::Current
+    mov r1, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -345,13 +345,13 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_EVERYTHING_FRAME r2  @ save all registers as basis for long jump context
-    mov r2, r9                      @ pass Thread::Current
+    mov r2, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
 
 .macro  RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
-    ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET]   // Get exception field.
+    ldr \reg, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ Get exception field.
     cbnz \reg, 1f
     bx lr
 1:
@@ -377,7 +377,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r1        @ save callee saves in case of GC
-    mov    r1, r9                        @ pass Thread::Current
+    mov    r1, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (uint32_t field_idx, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -389,7 +389,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r2, r9                        @ pass Thread::Current
+    mov    r2, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (field_idx, Object*, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -401,7 +401,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r3        @ save callee saves in case of GC
-    mov    r3, r9                        @ pass Thread::Current
+    mov    r3, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (field_idx, Object*, new_val, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME         @ TODO: we can clearly save an add here
     REFRESH_MARKING_REGISTER
@@ -448,7 +448,7 @@
     @ save all registers as basis for long jump context
     SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1
     mov r0, lr                      @ pass the fault address stored in LR by the fault handler.
-    mov r1, r9                      @ pass Thread::Current
+    mov r1, rSELF                   @ pass Thread::Current
     bl  artThrowNullPointerExceptionFromSignal  @ (Thread*)
 END art_quick_throw_null_pointer_exception_from_signal
 
@@ -494,7 +494,7 @@
 .macro INVOKE_TRAMPOLINE_BODY cxx_name
     .extern \cxx_name
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2     @ save callee saves in case allocation triggers GC
-    mov    r2, r9                         @ pass Thread::Current
+    mov    r2, rSELF                      @ pass Thread::Current
     mov    r3, sp
     bl     \cxx_name                      @ (method_idx, this, Thread*, SP)
     mov    r12, r1                        @ save Method*->code_
@@ -682,50 +682,48 @@
      */
     .extern artLockObjectFromCode
 ENTRY art_quick_lock_object
+    ldr    r1, [rSELF, #THREAD_ID_OFFSET]
     cbz    r0, .Lslow_lock
 .Lretry_lock:
-    ldr    r2, [r9, #THREAD_ID_OFFSET]
-    ldrex  r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
-    mov    r3, r1
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    cbnz   r3, .Lnot_unlocked         @ already thin locked
-    @ unlocked case - r1: original lock word that's zero except for the read barrier bits.
-    orr    r2, r1, r2                 @ r2 holds thread id with count of 0 with preserved read barrier bits
-    strex  r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
-    cbnz   r3, .Llock_strex_fail      @ store failed, retry
-    dmb    ish                        @ full (LoadLoad|LoadStore) memory barrier
+    ldrex  r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    eor    r3, r2, r1                 @ Prepare the value to store if unlocked
+                                      @   (thread id, count of 0 and preserved read barrier bits),
+                                      @ or prepare to compare thread id for recursive lock check
+                                      @   (lock_word.ThreadId() ^ self->ThreadId()).
+    ands   ip, r2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ Test the non-gc bits.
+    bne    .Lnot_unlocked             @ Check if unlocked.
+    @ unlocked case - store r3: original lock word plus thread id, preserved read barrier bits.
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    cbnz   r2, .Llock_strex_fail      @ If store failed, retry.
+    dmb    ish                        @ Full (LoadLoad|LoadStore) memory barrier.
     bx lr
-.Lnot_unlocked:  @ r1: original lock word, r2: thread_id with count of 0 and zero read barrier bits
-    lsr    r3, r1, LOCK_WORD_STATE_SHIFT
-    cbnz   r3, .Lslow_lock            @ if either of the top two bits are set, go slow path
-    eor    r2, r1, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
-    uxth   r2, r2                     @ zero top 16 bits
-    cbnz   r2, .Lslow_lock            @ lock word and self thread id's match -> recursive lock
-                                      @ else contention, go to slow path
-    mov    r3, r1                     @ copy the lock word to check count overflow.
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits.
-    add    r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ increment count in lock word placing in r2 to check overflow
-    lsr    r3, r2, #LOCK_WORD_GC_STATE_SHIFT    @ if the first gc state bit is set, we overflowed.
-    cbnz   r3, .Lslow_lock            @ if we overflow the count go slow path
-    add    r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ increment count for real
-    strex  r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
-    cbnz   r3, .Llock_strex_fail      @ strex failed, retry
+.Lnot_unlocked:  @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+                                      @ Check lock word state and thread id together,
+    bfc    r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+    cbnz   r3, .Lslow_lock            @ if either of the top two bits are set, or the lock word's
+                                      @ thread id did not match, go slow path.
+    add    r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ Increment the recursive lock count.
+                                      @ Extract the new thin lock count for overflow check.
+    ubfx   r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+    cbz    r2, .Lslow_lock            @ Zero as the new count indicates overflow, go slow path.
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits.
+    cbnz   r2, .Llock_strex_fail      @ If strex failed, retry.
     bx lr
 .Llock_strex_fail:
     b      .Lretry_lock               @ retry
-.Lslow_lock:
-    SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case we block
-    mov    r1, r9                     @ pass Thread::Current
-    bl     artLockObjectFromCode      @ (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_lock_object_no_inline (tail call).
 END art_quick_lock_object
 
 ENTRY art_quick_lock_object_no_inline
+    // This is also the slow path for art_quick_lock_object. Note that we
+    // need a local label, the assembler complains about target being out of
+    // range if we try to jump to `art_quick_lock_object_no_inline`.
+.Lslow_lock:
     SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case we block
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     artLockObjectFromCode      @ (Object* obj, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -739,62 +737,59 @@
      */
     .extern artUnlockObjectFromCode
 ENTRY art_quick_unlock_object
+    ldr    r1, [rSELF, #THREAD_ID_OFFSET]
     cbz    r0, .Lslow_unlock
 .Lretry_unlock:
 #ifndef USE_READ_BARRIER
-    ldr    r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    ldr    r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
-    ldrex  r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ Need to use atomic instructions for read barrier
+                                      @ Need to use atomic instructions for read barrier.
+    ldrex  r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #endif
-    lsr    r2, r1, #LOCK_WORD_STATE_SHIFT
-    cbnz   r2, .Lslow_unlock          @ if either of the top two bits are set, go slow path
-    ldr    r2, [r9, #THREAD_ID_OFFSET]
-    mov    r3, r1                     @ copy lock word to check thread id equality
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    eor    r3, r3, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
-    uxth   r3, r3                     @ zero top 16 bits
-    cbnz   r3, .Lslow_unlock          @ do lock word and self thread id's match?
-    mov    r3, r1                     @ copy lock word to detect transition to unlocked
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    cmp    r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
-    bpl    .Lrecursive_thin_unlock
-    @ transition to unlocked
-    mov    r3, r1
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED  @ r3: zero except for the preserved gc bits
-    dmb    ish                        @ full (LoadStore|StoreStore) memory barrier
+    eor    r3, r2, r1                 @ Prepare the value to store if simply locked
+                                      @   (mostly 0s, and preserved read barrier bits),
+                                      @ or prepare to compare thread id for recursive lock check
+                                      @   (lock_word.ThreadId() ^ self->ThreadId()).
+    ands   ip, r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ Test the non-gc bits.
+    bne    .Lnot_simply_locked        @ Locked recursively or by other thread?
+    @ Transition to unlocked.
+    dmb    ish                        @ Full (LoadStore|StoreStore) memory barrier.
 #ifndef USE_READ_BARRIER
     str    r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
     strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits
-    cbnz   r2, .Lunlock_strex_fail    @ store failed, retry
+    cbnz   r2, .Lunlock_strex_fail    @ If the store failed, retry.
 #endif
     bx     lr
-.Lrecursive_thin_unlock:  @ r1: original lock word
-    sub    r1, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ decrement count
+.Lnot_simply_locked:  @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+                                      @ Check lock word state and thread id together,
+    bfc    r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+    cbnz   r3, .Lslow_unlock          @ if either of the top two bits are set, or the lock word's
+                                      @ thread id did not match, go slow path.
+    sub    r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ Decrement recursive lock count.
 #ifndef USE_READ_BARRIER
-    str    r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    str    r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
-    strex  r2, r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits
-    cbnz   r2, .Lunlock_strex_fail    @ store failed, retry
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits.
+    cbnz   r2, .Lunlock_strex_fail    @ If the store failed, retry.
 #endif
     bx     lr
 .Lunlock_strex_fail:
     b      .Lretry_unlock             @ retry
-.Lslow_unlock:
-    @ save callee saves in case exception allocation triggers GC
-    SETUP_SAVE_REFS_ONLY_FRAME r1
-    mov    r1, r9                     @ pass Thread::Current
-    bl     artUnlockObjectFromCode    @ (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_unlock_object_no_inline (tail call).
 END art_quick_unlock_object
 
 ENTRY art_quick_unlock_object_no_inline
+    // This is also the slow path for art_quick_unlock_object. Note that we
+    // need a local label, the assembler complains about target being out of
+    // range if we try to jump to `art_quick_unlock_object_no_inline`.
+.Lslow_unlock:
     @ save callee saves in case exception allocation triggers GC
     SETUP_SAVE_REFS_ONLY_FRAME r1
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     artUnlockObjectFromCode    @ (Object* obj, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -832,7 +827,7 @@
 
 .Lthrow_class_cast_exception_for_bitstring_check:
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2       @ save all registers as basis for long jump context
-    mov r2, r9                      @ pass Thread::Current
+    mov r2, rSELF                   @ pass Thread::Current
     bl  artThrowClassCastExceptionForObject  @ (Object*, Class*, Thread*)
     bkpt
 END art_quick_check_instance_of
@@ -917,7 +912,7 @@
     add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
     POISON_HEAP_REF r2
     str r2, [r3, r1, lsl #2]
-    ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+    ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
     lsr r0, r0, #CARD_TABLE_CARD_SHIFT
     strb r3, [r3, r0]
     blx lr
@@ -945,7 +940,7 @@
     add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
     POISON_HEAP_REF r2
     str r2, [r3, r1, lsl #2]
-    ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+    ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
     lsr r0, r0, #CARD_TABLE_CARD_SHIFT
     strb r3, [r3, r0]
     blx lr
@@ -954,7 +949,7 @@
     /* No need to repeat restore cfi directives, the ones above apply here. */
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
     mov r1, r2
-    mov r2, r9                     @ pass Thread::Current
+    mov r2, rSELF                  @ pass Thread::Current
     bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
     bkpt                           @ unreached
 END art_quick_aput_obj
@@ -964,7 +959,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \entrypoint     @ (uint32_t type_idx, Method* method, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -977,7 +972,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r2     @ save callee saves in case of GC
-    mov    r2, r9                     @ pass Thread::Current
+    mov    r2, rSELF                  @ pass Thread::Current
     bl     \entrypoint     @ (uint32_t type_idx, Method* method, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -990,7 +985,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r3     @ save callee saves in case of GC
-    mov    r3, r9                     @ pass Thread::Current
+    mov    r3, rSELF                  @ pass Thread::Current
     @ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
     bl     \entrypoint
     RESTORE_SAVE_REFS_ONLY_FRAME
@@ -1004,7 +999,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r12    @ save callee saves in case of GC
-    str    r9, [sp, #-16]!            @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!         @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     \entrypoint
     add    sp, #16                    @ strip the extra frame
@@ -1023,7 +1018,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_EVERYTHING_FRAME r1, \runtime_method_offset    @ save everything in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \entrypoint                @ (uint32_t index, Thread*)
     cbz    r0, 1f                     @ If result is null, deliver the OOME.
     .cfi_remember_state
@@ -1065,9 +1060,9 @@
     .extern artGet64StaticFromCompiledCode
 ENTRY art_quick_get64_static
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r1, r9                        @ pass Thread::Current
-    bl     artGet64StaticFromCompiledCode        @ (uint32_t field_idx, Thread*)
-    ldr    r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    mov    r1, rSELF                     @ pass Thread::Current
+    bl     artGet64StaticFromCompiledCode  @ (uint32_t field_idx, Thread*)
+    ldr    r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
     cbnz   r2, 1f                        @ success if no exception pending
@@ -1091,9 +1086,9 @@
     .extern artGet64InstanceFromCompiledCode
 ENTRY art_quick_get64_instance
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r2, r9                        @ pass Thread::Current
-    bl     artGet64InstanceFromCompiledCode      @ (field_idx, Object*, Thread*)
-    ldr    r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    mov    r2, rSELF                     @ pass Thread::Current
+    bl     artGet64InstanceFromCompiledCode  @ (field_idx, Object*, Thread*)
+    ldr    r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
     cbnz   r2, 1f                        @ success if no exception pending
@@ -1125,7 +1120,7 @@
 ENTRY art_quick_set64_instance
     SETUP_SAVE_REFS_ONLY_FRAME r12       @ save callee saves in case of GC
                                          @ r2:r3 contain the wide argument
-    str    r9, [sp, #-16]!               @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!            @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     artSet64InstanceFromCompiledCode      @ (field_idx, Object*, new_val, Thread*)
     add    sp, #16                       @ release out args
@@ -1140,7 +1135,7 @@
 ENTRY art_quick_set64_static
     SETUP_SAVE_REFS_ONLY_FRAME r12        @ save callee saves in case of GC
                                           @ r2:r3 contain the wide argument
-    str    r9, [sp, #-16]!                @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!             @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*)
     add    sp, #16                        @ release out args
@@ -1185,12 +1180,12 @@
 .macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
 ENTRY \c_name
     // Fast path rosalloc allocation.
-    // r0: type/return value, r9: Thread::Current
+    // r0: type/return value, rSELF (r9): Thread::Current
     // r1, r2, r3, r12: free.
-    ldr    r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]     // Check if the thread local
+    ldr    r3, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]  // Check if the thread local
                                                               // allocation stack has room.
                                                               // TODO: consider using ldrd.
-    ldr    r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+    ldr    r12, [rSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
     cmp    r3, r12
     bhs    .Lslow_path\c_name
 
@@ -1208,7 +1203,7 @@
                                                               // from the size. Since the size is
                                                               // already aligned we can combine the
                                                               // two shifts together.
-    add    r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+    add    r12, rSELF, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
                                                               // Subtract pointer size since ther
                                                               // are no runs for 0 byte allocations
                                                               // and the size is already aligned.
@@ -1236,9 +1231,9 @@
                                                               // local allocation stack and
                                                               // increment the thread local
                                                               // allocation stack top.
-    ldr    r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+    ldr    r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
     str    r3, [r1], #COMPRESSED_REFERENCE_SIZE               // (Increment r1 as a side effect.)
-    str    r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+    str    r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
                                                               // Decrement the size of the free list
 
     // After this "STR" the object is published to the thread local allocation stack,
@@ -1287,7 +1282,7 @@
 
 .Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME r2     @ save callee saves in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \cxx_name                  @ (mirror::Class* cls, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1301,7 +1296,7 @@
 // The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
 // and art_quick_alloc_object_resolved/initialized_region_tlab.
 //
-// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// r0: type, rSELF (r9): Thread::Current, r1, r2, r3, r12: free.
 // Need to preserve r0 to the slow path.
 //
 // If isInitialized=1 then the compiler assumes the object's class has already been initialized.
@@ -1313,7 +1308,7 @@
 #if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
 #error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
 #endif
-    ldrd   r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldrd   r12, r3, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     sub    r12, r3, r12                                       // Compute the remaining buf size.
     ldr    r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (r3).
     cmp    r3, r12                                            // Check if it fits.
@@ -1326,9 +1321,9 @@
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
                                                               // Reload old thread_local_pos (r0)
                                                               // for the return value.
-    ldr    r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldr    r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     add    r1, r2, r3
-    str    r1, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
+    str    r1, [rSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
     // After this "STR" the object is published to the thread local allocation stack,
     // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
     // It is not yet visible to the running (user) compiled code until after the return.
@@ -1346,9 +1341,9 @@
     //
     // (Note: The actual check is done by checking that the object's class pointer is non-null.
     // Also, unlike rosalloc, the object can never be observed as null).
-    ldr    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    ldr    r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
     add    r1, r1, #1
-    str    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    str    r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
     POISON_HEAP_REF r0
     str    r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
                                                               // Fence. This is "ish" not "ishst" so
@@ -1375,12 +1370,12 @@
 .macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
 ENTRY \name
     // Fast path tlab allocation.
-    // r0: type, r9: Thread::Current
+    // r0: type, rSELF (r9): Thread::Current
     // r1, r2, r3, r12: free.
     ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized
 .Lslow_path\name:
     SETUP_SAVE_REFS_ONLY_FRAME r2                             // Save callee saves in case of GC.
-    mov    r1, r9                                             // Pass Thread::Current.
+    mov    r1, rSELF                                          // Pass Thread::Current.
     bl     \entrypoint                                        // (mirror::Class* klass, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1397,7 +1392,7 @@
 // The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
 // and art_quick_alloc_array_resolved/initialized_region_tlab.
 //
-// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// r0: type, r1: component_count, r2: total_size, rSELF (r9): Thread::Current, r3, r12: free.
 // Need to preserve r0 and r1 to the slow path.
 .macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
     and    r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED             // Apply alignment mask
@@ -1409,7 +1404,7 @@
 #if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
 #error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
 #endif
-    ldrd   r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldrd   r3, r12, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     sub    r12, r12, r3                                       // Compute the remaining buf size.
     cmp    r2, r12                                            // Check if the total_size fits.
     // The array class is always initialized here. Unlike new-instance,
@@ -1417,10 +1412,10 @@
     bhi    \slowPathLabel
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
     add    r2, r2, r3
-    str    r2, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
-    ldr    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    str    r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
+    ldr    r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
     add    r2, r2, #1
-    str    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    str    r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
     POISON_HEAP_REF r0
     str    r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
     str    r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET]              // Store the array length.
@@ -1443,7 +1438,7 @@
     // Fast path array allocation for region tlab allocation.
     // r0: mirror::Class* type
     // r1: int32_t component_count
-    // r9: thread
+    // rSELF (r9): thread
     // r2, r3, r12: free.
     \size_setup .Lslow_path\name
     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
@@ -1452,7 +1447,7 @@
     // r1: int32_t component_count
     // r2: Thread* self
     SETUP_SAVE_REFS_ONLY_FRAME r2  // save callee saves in case of GC
-    mov    r2, r9                  // pass Thread::Current
+    mov    r2, rSELF               // pass Thread::Current
     bl     \entrypoint
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1575,10 +1570,10 @@
      .extern artQuickProxyInvokeHandler
 ENTRY art_quick_proxy_invoke_handler
     SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     blx     artQuickProxyInvokeHandler  @ (Method* proxy method, receiver, Thread*, SP)
-    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr     r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     // Tear down the callee-save frame. Skip arg registers.
     add     sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
     .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1706,7 +1701,7 @@
     .extern artQuickResolutionTrampoline
 ENTRY art_quick_resolution_trampoline
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     blx     artQuickResolutionTrampoline  @ (Method* called, receiver, Thread*, SP)
     cbz     r0, 1f                 @ is code pointer null? goto exception
@@ -1780,10 +1775,10 @@
     blx artQuickGenericJniEndTrampoline
 
     // Restore self pointer.
-    mov r9, r11
+    mov rSELF, r11
 
     // Pending exceptions possible.
-    ldr r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     cbnz r2, .Lexception_in_native
 
     // Tear down the alloca.
@@ -1804,7 +1799,7 @@
     .cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
 
 .Lexception_in_native:
-    ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+    ldr ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
     add ip, ip, #-1  // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
     mov sp, ip
     .cfi_def_cfa_register sp
@@ -1815,10 +1810,10 @@
     .extern artQuickToInterpreterBridge
 ENTRY art_quick_to_interpreter_bridge
     SETUP_SAVE_REFS_AND_ARGS_FRAME r1
-    mov     r1, r9                 @ pass Thread::Current
+    mov     r1, rSELF              @ pass Thread::Current
     mov     r2, sp                 @ pass SP
     blx     artQuickToInterpreterBridge    @ (Method* method, Thread*, SP)
-    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr     r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     // Tear down the callee-save frame. Skip arg registers.
     add     sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
     .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1846,7 +1841,7 @@
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
     @ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
     str   r0, [sp, #4]
-    mov   r2, r9         @ pass Thread::Current
+    mov   r2, rSELF      @ pass Thread::Current
     mov   r3, sp         @ pass SP
     blx   artInstrumentationMethodEntryFromCode  @ (Method*, Object*, Thread*, SP)
     cbz   r0, .Ldeliver_instrumentation_entry_exception
@@ -1872,7 +1867,7 @@
     add   r3, sp, #8     @ store fpr_res pointer, in kSaveEverything frame
     add   r2, sp, #136   @ store gpr_res pointer, in kSaveEverything frame
     mov   r1, sp         @ pass SP
-    mov   r0, r9         @ pass Thread::Current
+    mov   r0, rSELF      @ pass Thread::Current
     blx   artInstrumentationMethodExitFromCode  @ (Thread*, SP, gpr_res*, fpr_res*)
 
     cbz   r0, .Ldo_deliver_instrumentation_exception
@@ -1901,7 +1896,7 @@
     .extern artDeoptimize
 ENTRY art_quick_deoptimize
     SETUP_SAVE_EVERYTHING_FRAME r0
-    mov    r0, r9         @ pass Thread::Current
+    mov    r0, rSELF      @ pass Thread::Current
     blx    artDeoptimize  @ (Thread*)
 END art_quick_deoptimize
 
@@ -1912,7 +1907,7 @@
     .extern artDeoptimizeFromCompiledCode
 ENTRY art_quick_deoptimize_from_compiled_code
     SETUP_SAVE_EVERYTHING_FRAME r1
-    mov    r1, r9                         @ pass Thread::Current
+    mov    r1, rSELF                      @ pass Thread::Current
     blx    artDeoptimizeFromCompiledCode  @ (DeoptimizationKind, Thread*)
 END art_quick_deoptimize_from_compiled_code
 
@@ -2691,7 +2686,7 @@
 .extern artInvokePolymorphic
 ENTRY art_quick_invoke_polymorphic
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     mov     r0, #0                 @ initialize 64-bit JValue as zero.
     str     r0, [sp, #-4]!
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
deleted file mode 100644
index 5c5b81b..0000000
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
-#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_arm.h"
-
-namespace art {
-namespace arm {
-
-static constexpr uint32_t kArmCalleeSaveAlwaysSpills =
-    (1 << art::arm::LR);
-static constexpr uint32_t kArmCalleeSaveRefSpills =
-    (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) | (1 << art::arm::R8) |
-    (1 << art::arm::R10) | (1 << art::arm::R11);
-static constexpr uint32_t kArmCalleeSaveArgSpills =
-    (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
-static constexpr uint32_t kArmCalleeSaveAllSpills =
-    (1 << art::arm::R4) | (1 << art::arm::R9);
-static constexpr uint32_t kArmCalleeSaveEverythingSpills =
-    (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
-    (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
-
-static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
-static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kArmCalleeSaveFpArgSpills =
-    (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2)  | (1 << art::arm::S3)  |
-    (1 << art::arm::S4)  | (1 << art::arm::S5)  | (1 << art::arm::S6)  | (1 << art::arm::S7)  |
-    (1 << art::arm::S8)  | (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
-    (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15);
-static constexpr uint32_t kArmCalleeSaveFpAllSpills =
-    (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
-    (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
-    (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
-    (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
-static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
-    kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
-
-constexpr uint32_t ArmCalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t ArmCalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t ArmCalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
-                              ArmCalleeSaveCoreSpills(type),
-                              ArmCalleeSaveFpSpills(type));
-}
-
-constexpr size_t ArmCalleeSaveFpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-         (POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
-          POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
-}
-
-constexpr size_t ArmCalleeSaveGpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-         POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
-}
-
-constexpr size_t ArmCalleeSaveLrOffset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-      POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
-}
-
-}  // namespace arm
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/callee_save_frame_arm64.h
similarity index 61%
rename from runtime/arch/arm64/quick_method_frame_info_arm64.h
rename to runtime/arch/arm64/callee_save_frame_arm64.h
index 2d2b500..bc36bfa 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/callee_save_frame_arm64.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
-#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#ifndef ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
@@ -79,57 +79,56 @@
     (1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
     (1 << art::arm64::D30) | (1 << art::arm64::D31);
 
-constexpr uint32_t Arm64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
-}
+class Arm64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t Arm64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t Arm64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
-                              Arm64CalleeSaveCoreSpills(type),
-                              Arm64CalleeSaveFpSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
 
-constexpr size_t Arm64CalleeSaveFpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-         (POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
-          POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
+  }
 
-constexpr size_t Arm64CalleeSaveGpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-         POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
+  }
 
-constexpr size_t Arm64CalleeSaveLrOffset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-      POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
-      static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kArm64PointerSize);
+  }
+};
 
 }  // namespace arm64
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#endif  // ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac5b2b8..14d0cc7 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1151,45 +1151,36 @@
      */
     .extern artLockObjectFromCode
 ENTRY art_quick_lock_object
-    cbz    w0, .Lslow_lock
-    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
+    ldr    w1, [xSELF, #THREAD_ID_OFFSET]
+    cbz    w0, art_quick_lock_object_no_inline
+                                      // Exclusive load/store has no immediate anymore.
+    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
 .Lretry_lock:
-    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
-    ldaxr  w1, [x4]                   // acquire needed only in most common case
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    cbnz   w3, .Lnot_unlocked         // already thin locked
-    // unlocked case - x1: original lock word that's zero except for the read barrier bits.
-    orr    x2, x1, x2                 // x2 holds thread id with count of 0 with preserved read barrier bits
-    stxr   w3, w2, [x4]
-    cbnz   w3, .Llock_stxr_fail       // store failed, retry
+    ldaxr  w2, [x4]                   // Acquire needed only in most common case.
+    eor    w3, w2, w1                 // Prepare the value to store if unlocked
+                                      //   (thread id, count of 0 and preserved read barrier bits),
+                                      // or prepare to compare thread id for recursive lock check
+                                      //   (lock_word.ThreadId() ^ self->ThreadId()).
+    tst    w2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // Test the non-gc bits.
+    b.ne   .Lnot_unlocked             // Check if unlocked.
+    // unlocked case - store w3: original lock word plus thread id, preserved read barrier bits.
+    stxr   w2, w3, [x4]
+    cbnz   w2, .Lretry_lock           // If the store failed, retry.
     ret
-.Lnot_unlocked:  // x1: original lock word
-    lsr    w3, w1, LOCK_WORD_STATE_SHIFT
-    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
-    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
-    uxth   w2, w2                     // zero top 16 bits
-    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
-                                      // else contention, go to slow path
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits.
-    add    w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count in lock word placing in w2 to check overflow
-    lsr    w3, w2, #LOCK_WORD_GC_STATE_SHIFT     // if the first gc state bit is set, we overflowed.
-    cbnz   w3, .Lslow_lock            // if we overflow the count go slow path
-    add    w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count for real
-    stxr   w3, w2, [x4]
-    cbnz   w3, .Llock_stxr_fail       // store failed, retry
+.Lnot_unlocked:  // w2: original lock word, w1: thread id, w3: w2 ^ w1
+                                      // Check lock word state and thread id together,
+    tst    w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+    b.ne   art_quick_lock_object_no_inline
+    add    w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // Increment the recursive lock count.
+    tst    w3, #LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED  // Test the new thin lock count.
+    b.eq   art_quick_lock_object_no_inline  // Zero as the new count indicates overflow, go slow path.
+    stxr   w2, w3, [x4]
+    cbnz   w2, .Lretry_lock           // If the store failed, retry.
     ret
-.Llock_stxr_fail:
-    b      .Lretry_lock               // retry
-.Lslow_lock:
-    SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case we block
-    mov    x1, xSELF                  // pass Thread::Current
-    bl     artLockObjectFromCode      // (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_W0_IS_ZERO_OR_DELIVER
 END art_quick_lock_object
 
 ENTRY art_quick_lock_object_no_inline
+    // This is also the slow path for art_quick_lock_object.
     SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case we block
     mov    x1, xSELF                  // pass Thread::Current
     bl     artLockObjectFromCode      // (Object* obj, Thread*)
@@ -1206,54 +1197,46 @@
      */
     .extern artUnlockObjectFromCode
 ENTRY art_quick_unlock_object
-    cbz    x0, .Lslow_unlock
-    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
+    ldr    w1, [xSELF, #THREAD_ID_OFFSET]
+    cbz    x0, art_quick_unlock_object_no_inline
+                                      // Exclusive load/store has no immediate anymore.
+    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
 .Lretry_unlock:
 #ifndef USE_READ_BARRIER
-    ldr    w1, [x4]
+    ldr    w2, [x4]
 #else
-    ldxr   w1, [x4]                   // Need to use atomic instructions for read barrier
+    ldxr   w2, [x4]                   // Need to use atomic instructions for read barrier.
 #endif
-    lsr    w2, w1, LOCK_WORD_STATE_SHIFT
-    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
-    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    eor    w3, w3, w2                 // lock_word.ThreadId() ^ self->ThreadId()
-    uxth   w3, w3                     // zero top 16 bits
-    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    cmp    w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
-    bpl    .Lrecursive_thin_unlock
-    // transition to unlocked
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED  // w3: zero except for the preserved read barrier bits
+    eor    w3, w2, w1                 // Prepare the value to store if simply locked
+                                      //   (mostly 0s, and preserved read barrier bits),
+                                      // or prepare to compare thread id for recursive lock check
+                                      //   (lock_word.ThreadId() ^ self->ThreadId()).
+    tst    w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // Test the non-gc bits.
+    b.ne   .Lnot_simply_locked        // Locked recursively or by other thread?
+    // Transition to unlocked.
 #ifndef USE_READ_BARRIER
     stlr   w3, [x4]
 #else
-    stlxr  w2, w3, [x4]               // Need to use atomic instructions for read barrier
-    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
+    stlxr  w2, w3, [x4]               // Need to use atomic instructions for read barrier.
+    cbnz   w2, .Lretry_unlock         // If the store failed, retry.
 #endif
     ret
-.Lrecursive_thin_unlock:  // w1: original lock word
-    sub    w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
+.Lnot_simply_locked:
+                                      // Check lock word state and thread id together,
+    tst    w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+    b.ne   art_quick_unlock_object_no_inline
+    sub    w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
 #ifndef USE_READ_BARRIER
-    str    w1, [x4]
+    str    w3, [x4]
 #else
-    stxr   w2, w1, [x4]               // Need to use atomic instructions for read barrier
-    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
+    stxr   w2, w3, [x4]               // Need to use atomic instructions for read barrier.
+    cbnz   w2, .Lretry_unlock         // If the store failed, retry.
 #endif
     ret
-.Lunlock_stxr_fail:
-    b      .Lretry_unlock             // retry
-.Lslow_unlock:
-    SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case exception allocation triggers GC
-    mov    x1, xSELF                  // pass Thread::Current
-    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_W0_IS_ZERO_OR_DELIVER
 END art_quick_unlock_object
 
 ENTRY art_quick_unlock_object_no_inline
+    // This is also the slow path for art_quick_unlock_object.
     SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case exception allocation triggers GC
     mov    x1, xSELF                  // pass Thread::Current
     bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
similarity index 66%
rename from runtime/arch/mips/quick_method_frame_info_mips.h
rename to runtime/arch/mips/callee_save_frame_mips.h
index 8c86252..6e88d08 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/callee_save_frame_mips.h
@@ -14,13 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#ifndef ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
+#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips.h"
 
@@ -80,37 +81,56 @@
     (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
     (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
 
-constexpr uint32_t MipsCalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
-}
+class MipsCalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t MipsCalleeSaveFPSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t MipsCalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(MipsCalleeSaveFPSpills(type))   /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
-                              MipsCalleeSaveCoreSpills(type),
-                              MipsCalleeSaveFPSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMipsPointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMipsPointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kMipsPointerSize);
+  }
+};
 
 }  // namespace mips
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#endif  // ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index d5a9b15..7c8ac28 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -17,13 +17,13 @@
 #include <sys/ucontext.h>
 #include "fault_handler.h"
 
+#include "arch/mips/callee_save_frame_mips.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
-#include "quick_method_frame_info_mips.h"
 #include "registers_mips.h"
 #include "thread-current-inl.h"
 
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
similarity index 61%
rename from runtime/arch/mips64/quick_method_frame_info_mips64.h
rename to runtime/arch/mips64/callee_save_frame_mips64.h
index 520f631..59529a0 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/callee_save_frame_mips64.h
@@ -14,13 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#ifndef ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
+#define ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
+#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips64.h"
 
@@ -71,37 +72,56 @@
     (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
     (1 << art::mips64::F30) | (1 << art::mips64::F31);
 
-constexpr uint32_t Mips64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
-}
+class Mips64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t Mips64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMips64CalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMips64CalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t Mips64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(Mips64CalleeSaveFpSpills(type))   /* fprs */ +
-                  + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
+                    + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(Mips64CalleeSaveFrameSize(type),
-                              Mips64CalleeSaveCoreSpills(type),
-                              Mips64CalleeSaveFpSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMips64PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMips64PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kMips64PointerSize);
+  }
+};
 
 }  // namespace mips64
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#endif  // ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 695da47..85f3528 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -18,13 +18,13 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/mips64/callee_save_frame_mips64.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
-#include "quick_method_frame_info_mips64.h"
 #include "registers_mips64.h"
 #include "thread-current-inl.h"
 
diff --git a/runtime/arch/x86/callee_save_frame_x86.h b/runtime/arch/x86/callee_save_frame_x86.h
new file mode 100644
index 0000000..f336f43
--- /dev/null
+++ b/runtime/arch/x86/callee_save_frame_x86.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+#define ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86.h"
+
+namespace art {
+namespace x86 {
+
+static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
+    (1 << art::x86::kNumberOfCpuRegisters);  // Fake return address callee save.
+static constexpr uint32_t kX86CalleeSaveRefSpills =
+    (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
+static constexpr uint32_t kX86CalleeSaveArgSpills =
+    (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+static constexpr uint32_t kX86CalleeSaveEverythingSpills =
+    (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
+static constexpr uint32_t kX86CalleeSaveFpArgSpills =
+    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+    (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
+static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
+    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+    (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
+    (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
+    (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
+
+class X86CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+           (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    2 * POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            2 * POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kX86PointerSize);
+  }
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8ab4ce1..b89d45f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1292,7 +1292,7 @@
     jz   .Lslow_lock
 .Lretry_lock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx  // ecx := lock word
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx         // test the 2 high bits.
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx  // test the 2 high bits.
     jne  .Lslow_lock                      // slow path if either of the two high bits are set.
     movl %ecx, %edx                       // save lock word (edx) to keep read barrier bits.
     andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx  // zero the gc bits.
@@ -1362,7 +1362,7 @@
 .Lretry_unlock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx  // ecx := lock word
     movl %fs:THREAD_ID_OFFSET, %edx       // edx := thread id
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
     jnz  .Lslow_unlock                    // lock word contains a monitor
     cmpw %cx, %dx                         // does the thread id match?
     jne  .Lslow_unlock
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
deleted file mode 100644
index 9a66333..0000000
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_x86.h"
-
-namespace art {
-namespace x86 {
-
-enum XMM {
-  XMM0 = 0,
-  XMM1 = 1,
-  XMM2 = 2,
-  XMM3 = 3,
-  XMM4 = 4,
-  XMM5 = 5,
-  XMM6 = 6,
-  XMM7 = 7,
-};
-
-static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
-    (1 << art::x86::kNumberOfCpuRegisters);  // Fake return address callee save.
-static constexpr uint32_t kX86CalleeSaveRefSpills =
-    (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
-static constexpr uint32_t kX86CalleeSaveArgSpills =
-    (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-static constexpr uint32_t kX86CalleeSaveEverythingSpills =
-    (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-
-static constexpr uint32_t kX86CalleeSaveFpArgSpills =
-    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
-    (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
-static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
-    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
-    (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
-    (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
-    (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
-
-constexpr uint32_t X86CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
-         (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
-                  2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
-                              X86CalleeSaveCoreSpills(type),
-                              X86CalleeSaveFpSpills(type));
-}
-
-}  // namespace x86
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h
index 5a5d226..d3b959f 100644
--- a/runtime/arch/x86/registers_x86.h
+++ b/runtime/arch/x86/registers_x86.h
@@ -42,6 +42,20 @@
 };
 std::ostream& operator<<(std::ostream& os, const Register& rhs);
 
+enum XmmRegister {
+  XMM0 = 0,
+  XMM1 = 1,
+  XMM2 = 2,
+  XMM3 = 3,
+  XMM4 = 4,
+  XMM5 = 5,
+  XMM6 = 6,
+  XMM7 = 7,
+  kNumberOfXmmRegisters = 8,
+  kNoXmmRegister = -1  // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+
 }  // namespace x86
 }  // namespace art
 
diff --git a/runtime/arch/x86_64/callee_save_frame_x86_64.h b/runtime/arch/x86_64/callee_save_frame_x86_64.h
new file mode 100644
index 0000000..228a902
--- /dev/null
+++ b/runtime/arch/x86_64/callee_save_frame_x86_64.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86_64.h"
+
+namespace art {
+namespace x86_64 {
+
+static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
+    (1 << art::x86_64::kNumberOfCpuRegisters);  // Fake return address callee save.
+static constexpr uint32_t kX86_64CalleeSaveRefSpills =
+    (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
+    (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
+static constexpr uint32_t kX86_64CalleeSaveArgSpills =
+    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
+    (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
+    (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
+    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
+    (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
+
+static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
+    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
+    (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
+static constexpr uint32_t kX86_64CalleeSaveFpSpills =
+    (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
+    (1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
+static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
+    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
+    (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
+    (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
+    (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
+    (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
+
+class X86_64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86_64CalleeSaveFpSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86_64PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86_64PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kX86_64PointerSize);
+  }
+};
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index eb945ed..c179033 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1312,7 +1312,7 @@
     jz   .Lslow_lock
 .Lretry_lock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx  // ecx := lock word.
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx         // Test the 2 high bits.
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx  // Test the 2 high bits.
     jne  .Lslow_lock                      // Slow path if either of the two high bits are set.
     movl %ecx, %edx                       // save lock word (edx) to keep read barrier bits.
     andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx  // zero the gc bits.
@@ -1362,7 +1362,7 @@
 .Lretry_unlock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx  // ecx := lock word
     movl %gs:THREAD_ID_OFFSET, %edx       // edx := thread id
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
     jnz  .Lslow_unlock                    // lock word contains a monitor
     cmpw %cx, %dx                         // does the thread id match?
     jne  .Lslow_unlock
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
deleted file mode 100644
index ebf976e..0000000
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
-#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_x86_64.h"
-
-namespace art {
-namespace x86_64 {
-
-static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
-    (1 << art::x86_64::kNumberOfCpuRegisters);  // Fake return address callee save.
-static constexpr uint32_t kX86_64CalleeSaveRefSpills =
-    (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
-    (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
-static constexpr uint32_t kX86_64CalleeSaveArgSpills =
-    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
-    (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
-static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
-    (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
-    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
-    (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
-
-static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
-    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
-    (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
-    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
-static constexpr uint32_t kX86_64CalleeSaveFpSpills =
-    (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
-    (1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
-static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
-    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
-    (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
-    (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
-    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
-    (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
-    (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
-
-constexpr uint32_t X86_64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t X86_64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86_64CalleeSaveFpSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t X86_64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
-                              X86_64CalleeSaveCoreSpills(type),
-                              X86_64CalleeSaveFpSpills(type));
-}
-
-}  // namespace x86_64
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 9ac7886..702f0e4 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -125,7 +125,7 @@
 }
 
 void MemMapArenaPool::FreeArenaChain(Arena* first) {
-  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+  if (kRunningOnMemoryTool) {
     for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
       MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
     }
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e2ad7fd..6917899 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -64,20 +64,19 @@
   void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
-    CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+    CodeInfo code_info(GetCurrentOatQuickMethodHeader());
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
     CodeItemDataAccessor accessor(m->DexInstructionData());
     uint16_t number_of_dex_registers = accessor.RegistersSize();
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
-    uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+    uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     for (int i = 0; i < number_of_references; ++i) {
       int reg = registers[i];
       CHECK_LT(reg, accessor.RegistersSize());
       DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
-          reg, number_of_dex_registers, code_info, encoding);
+          reg, number_of_dex_registers, code_info);
       switch (location.GetKind()) {
         case DexRegisterLocation::Kind::kNone:
           // Not set, should not be a reference.
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 8fd95ed..657a78b 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -880,11 +880,14 @@
 
 void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> expected_type,
                                    ObjPtr<mirror::MethodType> actual_type) {
-  ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
-                 nullptr,
-                 StringPrintf("Expected %s but was %s",
-                              expected_type->PrettyDescriptor().c_str(),
-                              actual_type->PrettyDescriptor().c_str()).c_str());
+  ThrowWrongMethodTypeException(expected_type->PrettyDescriptor(), actual_type->PrettyDescriptor());
+}
+
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+                                   const std::string& actual_descriptor) {
+  std::ostringstream msg;
+  msg << "Expected " << expected_descriptor << " but was " << actual_descriptor;
+  ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",  nullptr, msg.str().c_str());
 }
 
 }  // namespace art
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 29a056e..6acff6f 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -274,6 +274,10 @@
                                    ObjPtr<mirror::MethodType> callsite_type)
     REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
 
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+                                   const std::string& actual_descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_COMMON_THROWS_H_
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index c399b1c..95b42d2 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -29,6 +29,7 @@
 #include "mirror/field.h"
 #include "mirror/method.h"
 #include "oat_file.h"
+#include "obj_ptr-inl.h"
 #include "reflection.h"
 #include "thread.h"
 #include "well_known_classes.h"
@@ -116,9 +117,9 @@
   DISALLOW_COPY_AND_ASSIGN(ClassData);
 };
 
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
-                                       Handle<mirror::Class> annotation_class,
-                                       const uint8_t** annotation)
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+                                              Handle<mirror::Class> annotation_class,
+                                              const uint8_t** annotation)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) {
@@ -333,7 +334,7 @@
   return dex_file.GetClassAnnotationSet(annotations_dir);
 }
 
-mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
+ObjPtr<mirror::Object> ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   uint32_t type_index = DecodeUnsignedLeb128(annotation);
   uint32_t size = DecodeUnsignedLeb128(annotation);
@@ -355,13 +356,13 @@
   }
 
   ObjPtr<mirror::Class> annotation_member_class =
-      soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember).Ptr();
-  mirror::Class* annotation_member_array_class =
+      soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember);
+  ObjPtr<mirror::Class> annotation_member_array_class =
       class_linker->FindArrayClass(self, &annotation_member_class);
   if (annotation_member_array_class == nullptr) {
     return nullptr;
   }
-  mirror::ObjectArray<mirror::Object>* element_array = nullptr;
+  ObjPtr<mirror::ObjectArray<mirror::Object>> element_array = nullptr;
   if (size > 0) {
     element_array =
         mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
@@ -373,7 +374,7 @@
 
   Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array));
   for (uint32_t i = 0; i < size; ++i) {
-    mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation);
+    ObjPtr<mirror::Object> new_member = CreateAnnotationMember(klass, annotation_class, annotation);
     if (new_member == nullptr) {
       return nullptr;
     }
@@ -605,7 +606,7 @@
             return false;
           }
           if (!component_type->IsPrimitive()) {
-            mirror::Object* obj = new_annotation_value.value_.GetL();
+            ObjPtr<mirror::Object> obj = new_annotation_value.value_.GetL();
             new_array->AsObjectArray<mirror::Object>()->
                 SetWithoutChecks<kTransactionActive>(i, obj);
           } else {
@@ -682,20 +683,20 @@
   *annotation_ptr = annotation;
 
   if (result_style == DexFile::kAllObjects && primitive_type != Primitive::kPrimVoid) {
-    element_object = BoxPrimitive(primitive_type, annotation_value->value_).Ptr();
+    element_object = BoxPrimitive(primitive_type, annotation_value->value_);
     set_object = true;
   }
 
   if (set_object) {
-    annotation_value->value_.SetL(element_object.Ptr());
+    annotation_value->value_.SetL(element_object);
   }
 
   return true;
 }
 
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
-                                       Handle<mirror::Class> annotation_class,
-                                       const uint8_t** annotation) {
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+                                              Handle<mirror::Class> annotation_class,
+                                              const uint8_t** annotation) {
   const DexFile& dex_file = klass.GetDexFile();
   Thread* self = Thread::Current();
   ScopedObjectAccessUnchecked soa(self);
@@ -799,7 +800,7 @@
   return nullptr;
 }
 
-mirror::Object* GetAnnotationObjectFromAnnotationSet(
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
     const ClassData& klass,
     const DexFile::AnnotationSetItem* annotation_set,
     uint32_t visibility,
@@ -814,11 +815,11 @@
   return ProcessEncodedAnnotation(klass, &annotation);
 }
 
-mirror::Object* GetAnnotationValue(const ClassData& klass,
-                                   const DexFile::AnnotationItem* annotation_item,
-                                   const char* annotation_name,
-                                   Handle<mirror::Class> array_class,
-                                   uint32_t expected_type)
+ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
+                                          const DexFile::AnnotationItem* annotation_item,
+                                          const char* annotation_name,
+                                          Handle<mirror::Class> array_class,
+                                          uint32_t expected_type)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   const uint8_t* annotation =
@@ -864,7 +865,7 @@
   if (string_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(klass, annotation_item, "value", string_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -873,8 +874,9 @@
   return obj->AsObjectArray<mirror::String>();
 }
 
-mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
-                                                   const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
+    const ClassData& klass,
+    const DexFile::AnnotationSetItem* annotation_set)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   StackHandleScope<1> hs(Thread::Current());
@@ -890,7 +892,7 @@
   if (class_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(klass, annotation_item, "value", class_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -899,7 +901,7 @@
   return obj->AsObjectArray<mirror::Class>();
 }
 
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
     const ClassData& klass,
     const DexFile::AnnotationSetItem* annotation_set,
     uint32_t visibility)
@@ -930,7 +932,7 @@
       continue;
     }
     const uint8_t* annotation = annotation_item->annotation_;
-    mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
+    ObjPtr<mirror::Object> annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
     if (annotation_obj != nullptr) {
       result->SetWithoutChecks<false>(dest_index, annotation_obj);
       ++dest_index;
@@ -943,21 +945,21 @@
     return result.Get();
   }
 
-  mirror::ObjectArray<mirror::Object>* trimmed_result =
+  ObjPtr<mirror::ObjectArray<mirror::Object>> trimmed_result =
       mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
   if (trimmed_result == nullptr) {
     return nullptr;
   }
 
   for (uint32_t i = 0; i < dest_index; ++i) {
-    mirror::Object* obj = result->GetWithoutChecks(i);
+    ObjPtr<mirror::Object> obj = result->GetWithoutChecks(i);
     trimmed_result->SetWithoutChecks<false>(i, obj);
   }
 
   return trimmed_result;
 }
 
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
     const ClassData& klass,
     const DexFile::AnnotationSetRefList* set_ref_list,
     uint32_t size)
@@ -968,7 +970,7 @@
   StackHandleScope<1> hs(self);
   ObjPtr<mirror::Class> annotation_array_class =
       soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
-  mirror::Class* annotation_array_array_class =
+  ObjPtr<mirror::Class> annotation_array_array_class =
       Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
   if (annotation_array_array_class == nullptr) {
     return nullptr;
@@ -982,8 +984,9 @@
   for (uint32_t index = 0; index < size; ++index) {
     const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
     const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
-    mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item,
-                                                          DexFile::kDexVisibilityRuntime);
+    ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
+                                                                 set_item,
+                                                                 DexFile::kDexVisibilityRuntime);
     if (annotation_set == nullptr) {
       return nullptr;
     }
@@ -995,7 +998,8 @@
 
 namespace annotations {
 
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+                                             Handle<mirror::Class> annotation_class) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1008,7 +1012,7 @@
                                               annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   StackHandleScope<1> hs(Thread::Current());
   const ClassData field_class(hs, field);
@@ -1037,7 +1041,7 @@
   return annotation_item != nullptr;
 }
 
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
   const ClassData klass(method);
   const DexFile* dex_file = &klass.GetDexFile();
   const DexFile::AnnotationsDirectoryItem* annotations_dir =
@@ -1081,7 +1085,8 @@
   return annotation_value.value_.GetL();
 }
 
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+                                              Handle<mirror::Class> annotation_class) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1090,14 +1095,14 @@
                                               DexFile::kDexVisibilityRuntime, annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   return ProcessAnnotationSet(ClassData(method),
                               annotation_set,
                               DexFile::kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1105,7 +1110,7 @@
   return GetThrowsValue(ClassData(method), annotation_set);
 }
 
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
   const DexFile* dex_file = method->GetDexFile();
   const DexFile::ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
@@ -1136,9 +1141,9 @@
   return set_ref_list->size_;
 }
 
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
-                                                uint32_t parameter_idx,
-                                                Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+                                                       uint32_t parameter_idx,
+                                                       Handle<mirror::Class> annotation_class) {
   const DexFile* dex_file = method->GetDexFile();
   const DexFile::ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
@@ -1307,8 +1312,8 @@
   return access_flags;
 }
 
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
-                                      Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
+                                             Handle<mirror::Class> annotation_class) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1320,13 +1325,13 @@
                                               annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1345,7 +1350,7 @@
   if (class_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(data, annotation_item, "value", class_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -1354,7 +1359,7 @@
   return obj->AsObjectArray<mirror::Class>();
 }
 
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1366,17 +1371,19 @@
   if (annotation_item == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value",
-                                           ScopedNullHandle<mirror::Class>(),
-                                           DexFile::kDexAnnotationType);
+  ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+                                                  annotation_item,
+                                                  "value",
+                                                  ScopedNullHandle<mirror::Class>(),
+                                                  DexFile::kDexAnnotationType);
   if (obj == nullptr) {
     return nullptr;
   }
   return obj->AsClass();
 }
 
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
-  mirror::Class* declaring_class = GetDeclaringClass(klass);
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass) {
+  ObjPtr<mirror::Class> declaring_class = GetDeclaringClass(klass);
   if (declaring_class != nullptr) {
     return declaring_class;
   }
@@ -1420,7 +1427,7 @@
   return method->GetDeclaringClass();
 }
 
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1438,7 +1445,7 @@
       DexFile::kDexAnnotationMethod);
 }
 
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index 4bb0d75..9645a7f 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -22,6 +22,7 @@
 #include "handle.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object_array.h"
+#include "obj_ptr.h"
 
 namespace art {
 
@@ -35,9 +36,10 @@
 namespace annotations {
 
 // Field annotations.
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+                                             Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field)
     REQUIRES_SHARED(Locks::mutator_lock_);
 mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field)
     REQUIRES_SHARED(Locks::mutator_lock_);
@@ -45,21 +47,22 @@
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 // Method annotations.
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method)
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+                                              Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
 uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
-                                                uint32_t parameter_idx,
-                                                Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+                                                       uint32_t parameter_idx,
+                                                       Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
 bool GetParametersMetadataForMethod(ArtMethod* method,
                                     MutableHandle<mirror::ObjectArray<mirror::String>>* names,
@@ -85,20 +88,20 @@
                                               uint32_t method_index);
 
 // Class annotations.
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
                                       Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass)
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name)
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name)
     REQUIRES_SHARED(Locks::mutator_lock_);
 bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags)
     REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index d4e7492..f6b1c73 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -47,7 +47,6 @@
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
                                     const MethodInfo& method_info,
                                     const InlineInfo& inline_info,
-                                    const InlineInfoEncoding& encoding,
                                     uint8_t inlining_depth)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(!outer_method->IsObsolete());
@@ -57,12 +56,12 @@
   // suspended while executing it.
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
 
-  if (inline_info.EncodesArtMethodAtDepth(encoding, inlining_depth)) {
-    return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
+  if (inline_info.EncodesArtMethodAtDepth(inlining_depth)) {
+    return inline_info.GetArtMethodAtDepth(inlining_depth);
   }
 
-  uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
-  if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
+  uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, inlining_depth);
+  if (inline_info.GetDexPcAtDepth(inlining_depth) == static_cast<uint32_t>(-1)) {
     // "charAt" special case. It is the only non-leaf method we inline across dex files.
     ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
     DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
@@ -73,9 +72,9 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtMethod* method = outer_method;
   for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
-    DCHECK(!inline_info.EncodesArtMethodAtDepth(encoding, depth));
-    DCHECK_NE(inline_info.GetDexPcAtDepth(encoding, depth), static_cast<uint32_t>(-1));
-    method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, depth);
+    DCHECK(!inline_info.EncodesArtMethodAtDepth(depth));
+    DCHECK_NE(inline_info.GetDexPcAtDepth(depth), static_cast<uint32_t>(-1));
+    method_index = inline_info.GetMethodIndexAtDepth(method_info, depth);
     ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
                                                                    method->GetDexCache(),
                                                                    method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 7fc8db3..7f9b385 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -180,10 +180,10 @@
     ArtMethod** sp, CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
 
-  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+  const size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
   auto** caller_sp = reinterpret_cast<ArtMethod**>(
       reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-  const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+  const size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
   uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
       (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
   ArtMethod* outer_method = *caller_sp;
@@ -201,18 +201,16 @@
       DCHECK(current_code != nullptr);
       DCHECK(current_code->IsOptimized());
       uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-      CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+      CodeInfo code_info(current_code);
       MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+      if (stack_map.HasInlineInfo()) {
+        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
         caller = GetResolvedMethod(outer_method,
                                    method_info,
                                    inline_info,
-                                   encoding.inline_info.encoding,
-                                   inline_info.GetDepth(encoding.inline_info.encoding) - 1);
+                                   inline_info.GetDepth() - 1);
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index ef27ca3..6f1bbaa 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -21,16 +21,17 @@
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/mutex.h"
+#include "quick/quick_method_frame_info.h"
 #include "thread-inl.h"
 
 // Specific frame size code is in architecture-specific files. We include this to compile-time
 // specialize the code.
-#include "arch/arm/quick_method_frame_info_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
+#include "arch/arm/callee_save_frame_arm.h"
+#include "arch/arm64/callee_save_frame_arm64.h"
+#include "arch/mips/callee_save_frame_mips.h"
+#include "arch/mips64/callee_save_frame_mips64.h"
+#include "arch/x86/callee_save_frame_x86.h"
+#include "arch/x86_64/callee_save_frame_x86_64.h"
 
 namespace art {
 class ArtMethod;
@@ -67,57 +68,28 @@
   bool exit_check_;
 };
 
-static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
-  switch (isa) {
-    case InstructionSet::kArm:
-    case InstructionSet::kThumb2:
-      return arm::ArmCalleeSaveFrameSize(type);
-    case InstructionSet::kArm64:
-      return arm64::Arm64CalleeSaveFrameSize(type);
-    case InstructionSet::kMips:
-      return mips::MipsCalleeSaveFrameSize(type);
-    case InstructionSet::kMips64:
-      return mips64::Mips64CalleeSaveFrameSize(type);
-    case InstructionSet::kX86:
-      return x86::X86CalleeSaveFrameSize(type);
-    case InstructionSet::kX86_64:
-      return x86_64::X86_64CalleeSaveFrameSize(type);
-    case InstructionSet::kNone:
-      LOG(FATAL) << "kNone has no frame size";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Unknown ISA " << isa;
-  UNREACHABLE();
-}
+namespace detail_ {
 
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
-  switch (isa) {
-    case InstructionSet::kArm:
-    case InstructionSet::kThumb2:
-      return kArmPointerSize;
-    case InstructionSet::kArm64:
-      return kArm64PointerSize;
-    case InstructionSet::kMips:
-      return kMipsPointerSize;
-    case InstructionSet::kMips64:
-      return kMips64PointerSize;
-    case InstructionSet::kX86:
-      return kX86PointerSize;
-    case InstructionSet::kX86_64:
-      return kX86_64PointerSize;
-    case InstructionSet::kNone:
-      LOG(FATAL) << "kNone has no pointer size";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Unknown ISA " << isa;
-  UNREACHABLE();
-}
+template <InstructionSet>
+struct CSFSelector;  // No definition for unspecialized callee save frame selector.
 
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa, CalleeSaveType type) {
-  return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
-}
+// Note: kThumb2 is never the kRuntimeISA.
+template <>
+struct CSFSelector<InstructionSet::kArm> { using type = arm::ArmCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips> { using type = mips::MipsCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips64> { using type = mips64::Mips64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
+
+}  // namespace detail_
+
+using RuntimeCalleeSaveFrame = detail_::CSFSelector<kRuntimeISA>::type;
 
 }  // namespace art
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0a186f4..ff85f47 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -26,6 +26,7 @@
 #include "dex/dex_instruction-inl.h"
 #include "dex/method_reference.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "imt_conflict_table.h"
@@ -61,7 +62,16 @@
   static constexpr size_t kBytesStackArgLocation = 4;
   // Frame size in bytes of a callee-save frame for RefsAndArgs.
   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
-      GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs);
+      RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of first GPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+      RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of first FPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+      RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of return address.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
 #if defined(__arm__)
   // The callee save frame is pointed to by SP.
   // | argN       |  |
@@ -89,12 +99,6 @@
   static constexpr size_t kNumQuickGprArgs = 3;
   static constexpr size_t kNumQuickFprArgs = 16;
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
-      arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
-      arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
-      arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -127,15 +131,6 @@
   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
-      arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
-  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
-      arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
-  // Offset of return address.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
-      arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -179,9 +174,6 @@
                                                   // passed only in even numbered registers and each
                                                   // double occupies two registers.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -223,9 +215,6 @@
   static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = true;
 
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24;  // Offset of first FPR arg (F13).
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg (A1).
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -256,9 +245,6 @@
   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 4;  // 4 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -298,9 +284,6 @@
   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     switch (gpr_index) {
       case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
@@ -347,8 +330,8 @@
 
   static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
-                                                            CalleeSaveType::kSaveRefsAndArgs);
+    constexpr size_t callee_frame_size =
+        RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -356,16 +339,14 @@
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
 
     if (current_code->IsOptimized()) {
-      CodeInfo code_info = current_code->GetOptimizedCodeInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
+      CodeInfo code_info(current_code);
+      StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-        return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
-                                           inline_info.GetDepth(encoding.inline_info.encoding)-1);
+      if (stack_map.HasInlineInfo()) {
+        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+        return inline_info.GetDexPcAtDepth(inline_info.GetDepth()-1);
       } else {
-        return stack_map.GetDexPc(encoding.stack_map.encoding);
+        return stack_map.GetDexPc();
       }
     } else {
       return current_code->ToDexPc(*caller_sp, outer_pc);
@@ -375,8 +356,8 @@
   static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
-                                                            CalleeSaveType::kSaveRefsAndArgs);
+    constexpr size_t callee_frame_size =
+        RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -385,13 +366,12 @@
       return false;
     }
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
-    CodeInfo code_info = current_code->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    CodeInfo code_info(current_code);
     MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-    InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
+    InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset));
     if (invoke.IsValid()) {
-      *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
-      *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
+      *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType());
+      *dex_method_index = invoke.GetMethodIndex(method_info);
       return true;
     }
     return false;
@@ -400,8 +380,9 @@
   // For the given quick ref and args quick frame, return the caller's PC.
   static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
-    return *reinterpret_cast<uintptr_t*>(lr);
+    uint8_t* return_adress_spill =
+        reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
+    return *reinterpret_cast<uintptr_t*>(return_adress_spill);
   }
 
   QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
@@ -1159,8 +1140,8 @@
   CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
                                      << self->GetException()->Dump();
   // Compute address of return PC and sanity check that it currently holds 0.
-  size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA,
-                                                        CalleeSaveType::kSaveEverything);
+  constexpr size_t return_pc_offset =
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
   uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
                                                       return_pc_offset);
   CHECK_EQ(*return_pc, 0U);
@@ -1212,10 +1193,10 @@
   constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
   CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
 
-  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+  constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
   auto** caller_sp = reinterpret_cast<ArtMethod**>(
       reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-  const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+  constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
   uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
       (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
   ArtMethod* outer_method = *caller_sp;
@@ -1230,12 +1211,11 @@
   CHECK(current_code != nullptr);
   CHECK(current_code->IsOptimized());
   uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-  CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+  CodeInfo code_info(current_code);
   MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   CHECK(stack_map.IsValid());
-  uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+  uint32_t dex_pc = stack_map.GetDexPc();
 
   // Log the outer method and its associated dex file and class table pointer which can be used
   // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
@@ -1249,20 +1229,17 @@
   LOG(FATAL_WITHOUT_ABORT) << "  instruction: " << DumpInstruction(outer_method, dex_pc);
 
   ArtMethod* caller = outer_method;
-  if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    const InlineInfoEncoding& inline_info_encoding = encoding.inline_info.encoding;
-    size_t depth = inline_info.GetDepth(inline_info_encoding);
+  if (stack_map.HasInlineInfo()) {
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    size_t depth = inline_info.GetDepth();
     for (size_t d = 0; d < depth; ++d) {
       const char* tag = "";
-      dex_pc = inline_info.GetDexPcAtDepth(inline_info_encoding, d);
-      if (inline_info.EncodesArtMethodAtDepth(inline_info_encoding, d)) {
+      dex_pc = inline_info.GetDexPcAtDepth(d);
+      if (inline_info.EncodesArtMethodAtDepth(d)) {
         tag = "encoded ";
-        caller = inline_info.GetArtMethodAtDepth(inline_info_encoding, d);
+        caller = inline_info.GetArtMethodAtDepth(d);
       } else {
-        uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info_encoding,
-                                                                  method_info,
-                                                                  d);
+        uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, d);
         if (dex_pc == static_cast<uint32_t>(-1)) {
           tag = "special ";
           CHECK_EQ(d + 1u, depth);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 77b3132..89694e3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -54,15 +54,6 @@
     return save_method;
   }
 
-  static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
-      NO_THREAD_SAFETY_ANALYSIS {
-    ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
-    QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
-    EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
-        << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
-        << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
-  }
-
   static void CheckPCOffset(InstructionSet isa, CalleeSaveType type, size_t pc_offset)
       NO_THREAD_SAFETY_ANALYSIS {
     ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
@@ -74,79 +65,36 @@
   }
 };
 
-// Note: these tests are all runtime tests. They let the Runtime create the corresponding ArtMethod
-// and check against it. Technically we know and expect certain values, but the Runtime code is
-// not constexpr, so we cannot make this compile-time checks (and I want the Runtime code tested).
-
-// This test ensures that kQuickCalleeSaveFrame_RefAndArgs_FrameSize is correct.
-TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
-  // We have to use a define here as the callee_save_frame.h functions are constexpr.
-#define CHECK_FRAME_SIZE(isa)                                                        \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveRefsAndArgs,                                   \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsAndArgs));     \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveRefsOnly,                                      \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsOnly));        \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveAllCalleeSaves,                                \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveAllCalleeSaves));  \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverything,                                    \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveEverything));      \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverythingForClinit,                           \
-                 GetCalleeSaveFrameSize(isa,                                         \
-                                        CalleeSaveType::kSaveEverythingForClinit));  \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverythingForSuspendCheck,                     \
-                 GetCalleeSaveFrameSize(                                             \
-                     isa, CalleeSaveType::kSaveEverythingForSuspendCheck))
-
-  CHECK_FRAME_SIZE(InstructionSet::kArm);
-  CHECK_FRAME_SIZE(InstructionSet::kArm64);
-  CHECK_FRAME_SIZE(InstructionSet::kMips);
-  CHECK_FRAME_SIZE(InstructionSet::kMips64);
-  CHECK_FRAME_SIZE(InstructionSet::kX86);
-  CHECK_FRAME_SIZE(InstructionSet::kX86_64);
-}
-
-// This test ensures that GetConstExprPointerSize is correct with respect to
-// GetInstructionSetPointerSize.
-TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm),
-            GetConstExprPointerSize(InstructionSet::kArm));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm64),
-            GetConstExprPointerSize(InstructionSet::kArm64));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips),
-            GetConstExprPointerSize(InstructionSet::kMips));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips64),
-            GetConstExprPointerSize(InstructionSet::kMips64));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86),
-            GetConstExprPointerSize(InstructionSet::kX86));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86_64),
-            GetConstExprPointerSize(InstructionSet::kX86_64));
-}
-
 // This test ensures that the constexpr specialization of the return PC offset computation in
 // GetCalleeSavePCOffset is correct.
 TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
   // Ensure that the computation in callee_save_frame.h correct.
   // Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
   // sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverything,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverything));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForSuspendCheck,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA,
-                                            CalleeSaveType::kSaveEverythingForSuspendCheck));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveRefsAndArgs,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveRefsOnly,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsOnly));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveAllCalleeSaves,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveAllCalleeSaves));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverything,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverythingForClinit,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForClinit));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverythingForSuspendCheck,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForSuspendCheck));
 }
 
 }  // namespace art
diff --git a/runtime/exec_utils_test.cc b/runtime/exec_utils_test.cc
index 68edfa8..a9c1ea2 100644
--- a/runtime/exec_utils_test.cc
+++ b/runtime/exec_utils_test.cc
@@ -36,8 +36,10 @@
     command.push_back("/usr/bin/id");
   }
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_TRUE(Exec(command, &error_msg));
   }
   EXPECT_EQ(0U, error_msg.size()) << error_msg;
@@ -50,8 +52,10 @@
   std::vector<std::string> command;
   command.push_back("bogus");
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_FALSE(Exec(command, &error_msg));
     EXPECT_FALSE(error_msg.empty());
   }
@@ -72,8 +76,10 @@
   }
   command.push_back(kModifiedVariable);
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_FALSE(Exec(command, &error_msg));
     EXPECT_NE(0U, error_msg.size()) << error_msg;
   }
@@ -97,8 +103,10 @@
   }
   command.push_back(kDeletedVariable);
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_TRUE(Exec(command, &error_msg));
     EXPECT_EQ(0U, error_msg.size()) << error_msg;
   }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 150fe95..30213d5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -625,7 +625,7 @@
 
   // If true, check that the returned memory is actually zero.
   static constexpr bool kCheckZeroMemory = kIsDebugBuild;
-  // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
+  // Do not check memory when running under a memory tool. In a normal
   // build with kCheckZeroMemory the whole test should be optimized away.
   // TODO: Unprotect before checks.
   ALWAYS_INLINE bool ShouldCheckZeroMemory();
@@ -768,7 +768,7 @@
   // greater than or equal to this value, release pages.
   const size_t page_release_size_threshold_;
 
-  // Whether this allocator is running under Valgrind.
+  // Whether this allocator is running on a memory tool.
   bool is_running_on_memory_tool_;
 
   // The base address of the memory region that's managed by this allocator.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 948d233..6756868 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -272,7 +272,7 @@
     }
     case kAllocatorTypeRosAlloc: {
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
-        // If running on valgrind or asan, we should be using the instrumented path.
+        // If running on ASan, we should be using the instrumented path.
         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
                                                max_bytes_tl_bulk_allocated,
@@ -303,7 +303,7 @@
     }
     case kAllocatorTypeDlMalloc: {
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
-        // If running on valgrind, we should be using the instrumented path.
+        // If running on ASan, we should be using the instrumented path.
         ret = dlmalloc_space_->Alloc(self,
                                      alloc_size,
                                      bytes_allocated,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b004566..12021b7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2248,7 +2248,8 @@
       // Add a new bin with the remaining space.
       AddBin(size - alloc_size, pos + alloc_size);
     }
-    // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
+    // Copy the object over to its new location.
+    // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
     if (kUseBakerReadBarrier) {
       obj->AssertReadBarrierState();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 512cde4..a24ca32 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,8 +45,9 @@
   }
 
   ~MemoryToolLargeObjectMapSpace() OVERRIDE {
-    // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
-    // freed since they are held live by the class linker.
+    // Historical note: We were deleting large objects to keep Valgrind happy if there were
+    // any large objects such as Dex cache arrays which aren't freed since they are held live
+    // by the class linker.
     MutexLock mu(Thread::Current(), lock_);
     for (auto& m : large_objects_) {
       delete m.second.mem_map;
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index 8282f3d..c022171 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -30,11 +30,14 @@
 namespace memory_tool_details {
 
 template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
-inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
-                                         size_t bytes_allocated, size_t usable_size,
-                                         size_t bytes_tl_bulk_allocated,
-                                         size_t* bytes_allocated_out, size_t* usable_size_out,
-                                         size_t* bytes_tl_bulk_allocated_out) {
+inline mirror::Object* AdjustForMemoryTool(void* obj_with_rdz,
+                                           size_t num_bytes,
+                                           size_t bytes_allocated,
+                                           size_t usable_size,
+                                           size_t bytes_tl_bulk_allocated,
+                                           size_t* bytes_allocated_out,
+                                           size_t* usable_size_out,
+                                           size_t* bytes_tl_bulk_allocated_out) {
   if (bytes_allocated_out != nullptr) {
     *bytes_allocated_out = bytes_allocated;
   }
@@ -84,24 +87,31 @@
           bool kUseObjSizeForUsable>
 mirror::Object*
 MemoryToolMallocSpace<S,
-                    kMemoryToolRedZoneBytes,
-                    kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::AllocWithGrowth(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                      kMemoryToolRedZoneBytes,
+                      kAdjustForRedzoneInAllocSize,
+                      kUseObjSizeForUsable>::AllocWithGrowth(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                          &bytes_allocated, &usable_size,
+  void* obj_with_rdz = S::AllocWithGrowth(self,
+                                          num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                          &bytes_allocated,
+                                          &usable_size,
                                           &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
-      obj_with_rdz, num_bytes,
-      bytes_allocated, usable_size,
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
       bytes_tl_bulk_allocated,
       bytes_allocated_out,
       usable_size_out,
@@ -113,27 +123,35 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object* MemoryToolMallocSpace<S,
-                                    kMemoryToolRedZoneBytes,
-                                    kAdjustForRedzoneInAllocSize,
-                                    kUseObjSizeForUsable>::Alloc(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                                      kMemoryToolRedZoneBytes,
+                                      kAdjustForRedzoneInAllocSize,
+                                      kUseObjSizeForUsable>::Alloc(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
+  void* obj_with_rdz = S::Alloc(self,
+                                num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                &bytes_allocated,
+                                &usable_size,
+                                &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
-                                             kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
-                                                                   bytes_allocated, usable_size,
-                                                                   bytes_tl_bulk_allocated,
-                                                                   bytes_allocated_out,
-                                                                   usable_size_out,
-                                                                   bytes_tl_bulk_allocated_out);
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
+      bytes_tl_bulk_allocated,
+      bytes_allocated_out,
+      usable_size_out,
+      bytes_tl_bulk_allocated_out);
 }
 
 template <typename S,
@@ -141,24 +159,31 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object* MemoryToolMallocSpace<S,
-                                    kMemoryToolRedZoneBytes,
-                                    kAdjustForRedzoneInAllocSize,
-                                    kUseObjSizeForUsable>::AllocThreadUnsafe(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                                      kMemoryToolRedZoneBytes,
+                                      kAdjustForRedzoneInAllocSize,
+                                      kUseObjSizeForUsable>::AllocThreadUnsafe(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                            &bytes_allocated, &usable_size,
+  void* obj_with_rdz = S::AllocThreadUnsafe(self,
+                                            num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                            &bytes_allocated,
+                                            &usable_size,
                                             &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
-      obj_with_rdz, num_bytes,
-      bytes_allocated, usable_size,
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
       bytes_tl_bulk_allocated,
       bytes_allocated_out,
       usable_size_out,
@@ -170,12 +195,14 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::AllocationSize(
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::AllocationSize(
     mirror::Object* obj, size_t* usable_size) {
-  size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
+  size_t result = S::AllocationSize(
+      reinterpret_cast<mirror::Object*>(
+          reinterpret_cast<uint8_t*>(obj)
+              - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
       usable_size);
   if (usable_size != nullptr) {
     if (kUseObjSizeForUsable) {
@@ -192,10 +219,9 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::Free(
-    Thread* self, mirror::Object* ptr) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::Free(Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
   uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
 
@@ -220,10 +246,10 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::FreeList(
-    Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::FreeList(
+                                 Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
   size_t freed = 0;
   for (size_t i = 0; i < num_ptrs; i++) {
     freed += Free(self, ptrs[i]);
@@ -238,11 +264,12 @@
           bool kUseObjSizeForUsable>
 template <typename... Params>
 MemoryToolMallocSpace<S,
-                    kMemoryToolRedZoneBytes,
-                    kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::MemoryToolMallocSpace(
-    MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
-  // Don't want to change the valgrind states of the mem map here as the allocator is already
+                      kMemoryToolRedZoneBytes,
+                      kAdjustForRedzoneInAllocSize,
+                      kUseObjSizeForUsable>::MemoryToolMallocSpace(
+                          MemMap* mem_map, size_t initial_size, Params... params)
+                          : S(mem_map, initial_size, params...) {
+  // Don't want to change the memory tool states of the mem map here as the allocator is already
   // initialized at this point and that may interfere with what the allocator does internally. Note
   // that the tail beyond the initial size is mprotected.
 }
@@ -252,9 +279,9 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
   return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
 }
 
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index e786536..d698cf2 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -77,7 +77,7 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
+  // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (running_on_memory_tool) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
@@ -382,12 +382,12 @@
   size_t size = obj->SizeOf<kVerifyNone>();
   bool add_redzones = false;
   if (kMaybeIsRunningOnMemoryTool) {
-    add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+    add_redzones = kRunningOnMemoryTool ? kMemoryToolAddsRedzones : 0;
     if (add_redzones) {
       size += 2 * kDefaultMemoryToolRedZoneBytes;
     }
   } else {
-    DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
+    DCHECK(!kRunningOnMemoryTool);
   }
   size_t size_by_size = rosalloc_->UsableSize(size);
   if (kIsDebugBuild) {
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9d16b87..4c17233 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -159,8 +159,8 @@
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                         size_t maximum_size, bool low_memory_mode) OVERRIDE {
-    return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
-                          RUNNING_ON_MEMORY_TOOL != 0);
+    return CreateRosAlloc(
+        base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
   }
   static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
                                              size_t maximum_size, bool low_memory_mode,
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 46630db..464c2b7 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -90,16 +90,24 @@
 DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
 #define LOCK_WORD_STATE_SHIFT 30
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
+#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
 #define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
 #define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
 #define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
+#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
+#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
 #define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
 #define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
@@ -110,6 +118,8 @@
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
 #define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SIZE 2
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
 #define LOCK_WORD_GC_STATE_SHIFT 28
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
 #define LOCK_WORD_MARK_BIT_SHIFT 29
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 791ebf0..0e429a6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -510,7 +510,7 @@
     result->SetZ(false);
     return;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     result->SetZ(false);
     return;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index fd43562..aeb5f4b 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -864,11 +864,6 @@
 }
 
 TEST_F(UnstartedRuntimeTest, Pow) {
-  // Valgrind seems to get this wrong, actually. Disable for valgrind.
-  if (RUNNING_ON_MEMORY_TOOL != 0 && kMemoryToolIsValgrind) {
-    return;
-  }
-
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 86e69f4..736729c 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -332,7 +332,7 @@
     }
 
     // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
-    if (!RUNNING_ON_MEMORY_TOOL) {
+    if (!kRunningOnMemoryTool) {
       pool->StopWorkers(self);
       pool->RemoveAllTasks(self);
     }
@@ -473,11 +473,10 @@
       return false;
     }
 
-    CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    CodeInfo code_info(osr_method);
 
     // Find stack map starting at the target dex_pc.
-    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
     if (!stack_map.IsValid()) {
       // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
       // hope that the next branch has one.
@@ -494,7 +493,7 @@
     // We found a stack map, now fill the frame with dex register values from the interpreter's
     // shadow frame.
     DexRegisterMap vreg_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
 
     frame_size = osr_method->GetFrameSizeInBytes();
 
@@ -516,7 +515,7 @@
     } else {
       for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
         DexRegisterLocation::Kind location =
-            vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+            vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
         if (location == DexRegisterLocation::Kind::kNone) {
           // Dex register is dead or uninitialized.
           continue;
@@ -532,15 +531,14 @@
         int32_t vreg_value = shadow_frame->GetVReg(vreg);
         int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
                                                              number_of_vregs,
-                                                             code_info,
-                                                             encoding);
+                                                             code_info);
         DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
         DCHECK_GT(slot_offset, 0);
         (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
       }
     }
 
-    native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
+    native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
         osr_method->GetEntryPoint();
     VLOG(jit) << "Jumping to "
               << method_name
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 09d856f..ce7fe34 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -75,16 +75,18 @@
     // Remaining bits are the recursive lock count.
     kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
         kMarkBitStateSize,
-    // Thin lock bits. Owner in lowest bits.
 
+    // Thin lock bits. Owner in lowest bits.
     kThinLockOwnerShift = 0,
     kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
+    kThinLockOwnerMaskShifted = kThinLockOwnerMask << kThinLockOwnerShift,
     kThinLockMaxOwner = kThinLockOwnerMask,
     // Count in higher bits.
     kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
     kThinLockCountMask = (1 << kThinLockCountSize) - 1,
     kThinLockMaxCount = kThinLockCountMask,
     kThinLockCountOne = 1 << kThinLockCountShift,  // == 65536 (0x10000)
+    kThinLockCountMaskShifted = kThinLockCountMask << kThinLockCountShift,
 
     // State in the highest bits.
     kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
diff --git a/runtime/method_info.h b/runtime/method_info.h
index b00ddc6..6f74678 100644
--- a/runtime/method_info.h
+++ b/runtime/method_info.h
@@ -21,7 +21,7 @@
 
 #include "base/leb128.h"
 #include "base/macros.h"
-#include "base/memory_region.h"
+#include "base/bit_memory_region.h"
 
 namespace art {
 
@@ -35,8 +35,8 @@
   explicit MethodInfo(const uint8_t* ptr) {
     if (ptr != nullptr) {
       num_method_indices_ = DecodeUnsignedLeb128(&ptr);
-      region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
-                             num_method_indices_ * sizeof(MethodIndexType));
+      region_ = BitMemoryRegion(
+          MemoryRegion(const_cast<uint8_t*>(ptr), num_method_indices_ * sizeof(MethodIndexType)));
     }
   }
 
@@ -44,7 +44,7 @@
   MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
     DCHECK(ptr != nullptr);
     ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
-    region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+    region_ = BitMemoryRegion(MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)));
   }
 
   static size_t ComputeSize(size_t num_method_indices) {
@@ -71,7 +71,7 @@
 
  private:
   size_t num_method_indices_ = 0u;
-  MemoryRegion region_;
+  BitMemoryRegion region_;
 };
 
 }  // namespace art
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index d31e06c..44c819a 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1545,6 +1545,37 @@
   return GetMethodTypeForAccessMode(self, this, access_mode);
 }
 
+std::string VarHandle::PrettyDescriptorForAccessMode(AccessMode access_mode) {
+  // Effect MethodType::PrettyDescriptor() without first creating a method type first.
+  std::ostringstream oss;
+  oss << '(';
+
+  AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+  ObjPtr<Class> var_type = GetVarType();
+  ObjPtr<Class> ctypes[2] = { GetCoordinateType0(), GetCoordinateType1() };
+  const int32_t ptypes_count = GetNumberOfParameters(access_mode_template, ctypes[0], ctypes[1]);
+  int32_t ptypes_done = 0;
+  for (ObjPtr<Class> ctype : ctypes) {
+    if (!ctype.IsNull()) {
+      if (ptypes_done != 0) {
+        oss << ", ";
+      }
+      oss << ctype->PrettyDescriptor();;
+      ptypes_done++;
+    }
+  }
+  while (ptypes_done != ptypes_count) {
+    if (ptypes_done != 0) {
+      oss << ", ";
+    }
+    oss << var_type->PrettyDescriptor();
+    ptypes_done++;
+  }
+  ObjPtr<Class> rtype = GetReturnType(access_mode_template, var_type);
+  oss << ')' << rtype->PrettyDescriptor();
+  return oss.str();
+}
+
 bool VarHandle::Access(AccessMode access_mode,
                        ShadowFrame* shadow_frame,
                        const InstructionOperands* const operands,
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index eb3704e..5186d43 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -124,6 +124,11 @@
   MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Returns a string representing the descriptor of the MethodType associated with
+  // this AccessMode.
+  std::string PrettyDescriptorForAccessMode(AccessMode access_mode)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   bool Access(AccessMode access_mode,
               ShadowFrame* shadow_frame,
               const InstructionOperands* const operands,
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 68024cd..9f595b1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -648,7 +648,7 @@
     // Return an empty array instead of a null pointer.
     ObjPtr<mirror::Class>  annotation_array_class =
         soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
-    mirror::ObjectArray<mirror::Object>* empty_array =
+    ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
         mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
                                                    annotation_array_class.Ptr(),
                                                    0);
@@ -661,7 +661,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  mirror::ObjectArray<mirror::Class>* classes = nullptr;
+  ObjPtr<mirror::ObjectArray<mirror::Class>> classes = nullptr;
   if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
     classes = annotations::GetDeclaredClasses(klass);
   }
@@ -738,7 +738,7 @@
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     return nullptr;
   }
@@ -763,7 +763,7 @@
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     return false;
   }
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a5d6c97..13a8d28 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,7 +38,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
       ->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  mirror::ObjectArray<mirror::Class>* result_array =
+  ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
       annotations::GetExceptionTypesForMethod(method);
   if (result_array == nullptr) {
     // Return an empty array instead of a null pointer.
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2503b3c..52e0494 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -62,7 +62,7 @@
         klass->GetProxyThrows()->Get(throws_index);
     return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
   } else {
-    mirror::ObjectArray<mirror::Class>* result_array =
+    ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
         annotations::GetExceptionTypesForMethod(method);
     if (result_array == nullptr) {
       // Return an empty array instead of a null pointer
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 14f3f45..b3a47c3 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -289,8 +289,10 @@
                      ArtMethod* current_method,
                      void* ucontext_ptr,
                      bool skip_frames) {
-  // b/18119146
-  if (RUNNING_ON_MEMORY_TOOL != 0) {
+  // Historical note: This was disabled when running under Valgrind (b/18119146).
+  // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+  // check whether this test works with ASan.
+  if (kRunningOnMemoryTool) {
     return;
   }
 
diff --git a/runtime/oat.h b/runtime/oat.h
index 6c683f1..7b8f71a 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: compiler support const-method-handle
-  static constexpr uint8_t kOatVersion[] = { '1', '4', '3', '\0' };
+  // Last oat version changed reason: Refactor stackmap encoding.
+  static constexpr uint8_t kOatVersion[] = { '1', '4', '4', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 98238e5..aed6bc5 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -19,6 +19,7 @@
 #include "art_method.h"
 #include "dex/dex_file_types.h"
 #include "scoped_thread_state_change-inl.h"
+#include "stack_map.h"
 #include "thread.h"
 
 namespace art {
@@ -42,11 +43,10 @@
   const void* entry_point = GetEntryPoint();
   uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
   if (IsOptimized()) {
-    CodeInfo code_info = GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+    CodeInfo code_info(this);
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
     if (stack_map.IsValid()) {
-      return stack_map.GetDexPc(encoding.stack_map.encoding);
+      return stack_map.GetDexPc();
     }
   } else {
     DCHECK(method->IsNative());
@@ -71,18 +71,17 @@
   DCHECK(!method->IsNative());
   DCHECK(IsOptimized());
   // Search for the dex-to-pc mapping in stack maps.
-  CodeInfo code_info = GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(this);
 
   // All stack maps are stored in the same CodeItem section, safepoint stack
   // maps first, then catch stack maps. We use `is_for_catch_handler` to select
   // the order of iteration.
   StackMap stack_map =
-      LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
-                                   : code_info.GetStackMapForDexPc(dex_pc, encoding);
+      LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc)
+                                   : code_info.GetStackMapForDexPc(dex_pc);
   if (stack_map.IsValid()) {
     return reinterpret_cast<uintptr_t>(entry_point) +
-           stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
+           stack_map.GetNativePcOffset(kRuntimeISA);
   }
   if (abort_on_failure) {
     ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index f0966b7..d6762d6 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -22,7 +22,6 @@
 #include "base/utils.h"
 #include "method_info.h"
 #include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
 
 namespace art {
 
@@ -75,10 +74,6 @@
     return code_ - vmap_table_offset_;
   }
 
-  CodeInfo GetOptimizedCodeInfo() const {
-    return CodeInfo(GetOptimizedCodeInfoPtr());
-  }
-
   const void* GetOptimizedMethodInfoPtr() const {
     DCHECK(IsOptimized());
     return reinterpret_cast<const void*>(code_ - method_info_offset_);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 077aa33..c555fca 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -224,30 +224,29 @@
 
   CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
   const size_t number_of_vregs = accessor.RegistersSize();
-  CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(handler_method_header_);
 
   // Find stack map of the catch block.
-  StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+  StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
   DCHECK(catch_stack_map.IsValid());
   DexRegisterMap catch_vreg_map =
-      code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+      code_info.GetDexRegisterMapOf(catch_stack_map, number_of_vregs);
   if (!catch_vreg_map.IsValid()) {
     return;
   }
 
   // Find stack map of the throwing instruction.
   StackMap throw_stack_map =
-      code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
+      code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset());
   DCHECK(throw_stack_map.IsValid());
   DexRegisterMap throw_vreg_map =
-      code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
+      code_info.GetDexRegisterMapOf(throw_stack_map, number_of_vregs);
   DCHECK(throw_vreg_map.IsValid());
 
   // Copy values between them.
   for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
     DexRegisterLocation::Kind catch_location =
-        catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+        catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
     if (catch_location == DexRegisterLocation::Kind::kNone) {
       continue;
     }
@@ -257,8 +256,7 @@
     uint32_t vreg_value;
     VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg,
                                                                    number_of_vregs,
-                                                                   code_info,
-                                                                   encoding));
+                                                                   code_info));
     bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
                                                    vreg,
                                                    vreg_kind,
@@ -271,8 +269,7 @@
     // Copy value to the catch phi's stack slot.
     int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg,
                                                                number_of_vregs,
-                                                               code_info,
-                                                               encoding);
+                                                               code_info);
     ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
     uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
     uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
@@ -404,20 +401,18 @@
                                       const bool* updated_vregs)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-    CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+    CodeInfo code_info(method_header);
     uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
     CodeItemDataAccessor accessor(m->DexInstructionData());
     const size_t number_of_vregs = accessor.RegistersSize();
-    uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+    uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     DexRegisterMap vreg_map = IsInInlinedFrame()
         ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
-                                             code_info.GetInlineInfoOf(stack_map, encoding),
-                                             encoding,
+                                             code_info.GetInlineInfoOf(stack_map),
                                              number_of_vregs)
-        : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+        : code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
 
     if (!vreg_map.IsValid()) {
       return;
@@ -430,7 +425,7 @@
       }
 
       DexRegisterLocation::Kind location =
-          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
       static constexpr uint32_t kDeadValue = 0xEBADDE09;
       uint32_t value = kDeadValue;
       bool is_reference = false;
@@ -439,12 +434,11 @@
         case DexRegisterLocation::Kind::kInStack: {
           const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
                                                                 number_of_vregs,
-                                                                code_info,
-                                                                encoding);
+                                                                code_info);
           const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
           value = *reinterpret_cast<const uint32_t*>(addr);
           uint32_t bit = (offset >> 2);
-          if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
+          if (bit < code_info.GetNumberOfStackMaskBits() && stack_mask.LoadBit(bit)) {
             is_reference = true;
           }
           break;
@@ -453,7 +447,7 @@
         case DexRegisterLocation::Kind::kInRegisterHigh:
         case DexRegisterLocation::Kind::kInFpuRegister:
         case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
-          uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+          uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info);
           bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
           CHECK(result);
           if (location == DexRegisterLocation::Kind::kInRegister) {
@@ -464,7 +458,7 @@
           break;
         }
         case DexRegisterLocation::Kind::kConstant: {
-          value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+          value = vreg_map.GetConstant(vreg, number_of_vregs, code_info);
           if (value == 0) {
             // Make it a reference for extra safety.
             is_reference = true;
@@ -479,8 +473,7 @@
               << "Unexpected location kind "
               << vreg_map.GetLocationInternalKind(vreg,
                                                   number_of_vregs,
-                                                  code_info,
-                                                  encoding);
+                                                  code_info);
           UNREACHABLE();
         }
       }
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 4584351..374591e 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -19,8 +19,10 @@
 
 #include "runtime.h"
 
+#include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
 #include "obj_ptr-inl.h"
 
@@ -38,21 +40,22 @@
 
 inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) {
   DCHECK(method != nullptr);
+  DCHECK_EQ(instruction_set_, kRuntimeISA);
   // Cannot be imt-conflict-method or resolution-method.
   DCHECK_NE(method, GetImtConflictMethod());
   DCHECK_NE(method, GetResolutionMethod());
   // Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
   if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsAndArgs)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveAllCalleeSaves)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
   } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsOnly)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
   } else {
     DCHECK(method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverything) ||
            method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForClinit) ||
            method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForSuspendCheck));
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveEverything);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveEverything);
   }
 }
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4142cb0..9196eb2 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -39,18 +39,12 @@
 #include "android-base/strings.h"
 
 #include "aot_class_linker.h"
-#include "arch/arm/quick_method_frame_info_arm.h"
 #include "arch/arm/registers_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
 #include "arch/arm64/registers_arm64.h"
 #include "arch/instruction_set_features.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
 #include "arch/mips/registers_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
 #include "arch/mips64/registers_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
 #include "arch/x86/registers_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
 #include "arch/x86_64/registers_x86_64.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -245,7 +239,7 @@
       exit_(nullptr),
       abort_(nullptr),
       stats_enabled_(false),
-      is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
+      is_running_on_memory_tool_(kRunningOnMemoryTool),
       instrumentation_(),
       main_thread_group_(nullptr),
       system_thread_group_(nullptr),
@@ -1355,8 +1349,10 @@
     case InstructionSet::kMips:
     case InstructionSet::kMips64:
       implicit_null_checks_ = true;
-      // Installing stack protection does not play well with valgrind.
-      implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
+      // Installing stack protection does not play well with Valgrind.
+      // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+      // check whether setting `implicit_so_checks_` to `true` works with ASan.
+      implicit_so_checks_ = !kRunningOnMemoryTool;
       break;
     default:
       // Keep the defaults.
@@ -1371,8 +1367,8 @@
       // These need to be in a specific order.  The null point check handler must be
       // after the suspend check and stack overflow check handlers.
       //
-      // Note: the instances attach themselves to the fault manager and are handled by it. The manager
-      //       will delete the instance on Shutdown().
+      // Note: the instances attach themselves to the fault manager and are handled by it. The
+      //       manager will delete the instance on Shutdown().
       if (implicit_suspend_checks_) {
         new SuspensionHandler(&fault_manager);
       }
@@ -2203,38 +2199,21 @@
 
 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
   instruction_set_ = instruction_set;
-  if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kMips) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kMips64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kX86) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kX86_64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kArm64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
-    }
-  } else {
-    UNIMPLEMENTED(FATAL) << instruction_set_;
+  switch (instruction_set) {
+    case InstructionSet::kThumb2:
+      // kThumb2 is the same as kArm, use the canonical value.
+      instruction_set_ = InstructionSet::kArm;
+      break;
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
+      break;
+    default:
+      UNIMPLEMENTED(FATAL) << instruction_set_;
+      UNREACHABLE();
   }
 }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 953acbb..10f72e7 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -399,10 +399,6 @@
   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
-    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
-  }
-
   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -831,7 +827,6 @@
   GcRoot<mirror::Object> sentinel_;
 
   InstructionSet instruction_set_;
-  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
 
   CompilerCallbacks* compiler_callbacks_;
   bool is_zygote_;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 72d9919..54769f9 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -339,8 +339,8 @@
 };
 
 TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
-  // SigQuit induces a dump. ASAN isn't happy with libunwind reading memory.
-  TEST_DISABLED_FOR_MEMORY_TOOL_ASAN();
+  // SigQuit induces a dump. ASan isn't happy with libunwind reading memory.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
 
   // The runtime needs to be started for the signal handler.
   Thread* self = Thread::Current();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 229238e..7d1cb5c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -25,6 +25,7 @@
 #include "base/hex_dump.h"
 #include "dex/dex_file_types.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space-inl.h"
@@ -75,15 +76,14 @@
   }
 }
 
-static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header,
+static InlineInfo GetCurrentInlineInfo(CodeInfo& code_info,
+                                       const OatQuickMethodHeader* method_header,
                                        uintptr_t cur_quick_frame_pc)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
-  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   DCHECK(stack_map.IsValid());
-  return code_info.GetInlineInfoOf(stack_map, encoding);
+  return code_info.GetInlineInfoOf(stack_map);
 }
 
 ArtMethod* StackVisitor::GetMethod() const {
@@ -92,16 +92,16 @@
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
       size_t depth_in_stack_map = current_inlining_depth_ - 1;
-      InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(),
-                                                    cur_quick_frame_pc_);
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+      CodeInfo code_info(method_header);
+      InlineInfo inline_info = GetCurrentInlineInfo(code_info,
+                                                    method_header,
+                                                    cur_quick_frame_pc_);
       MethodInfo method_info = method_header->GetOptimizedMethodInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
       return GetResolvedMethod(*GetCurrentQuickFrame(),
                                method_info,
                                inline_info,
-                               encoding.inline_info.encoding,
                                depth_in_stack_map);
     } else {
       return *cur_quick_frame_;
@@ -115,11 +115,11 @@
     return cur_shadow_frame_->GetDexPC();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      size_t depth_in_stack_map = current_inlining_depth_ - 1;
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
-      return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_).
-          GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map);
+      CodeInfo code_info(method_header);
+      size_t depth_in_stack_map = current_inlining_depth_ - 1;
+      return GetCurrentInlineInfo(code_info, method_header, cur_quick_frame_pc_).
+          GetDexPcAtDepth(depth_in_stack_map);
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return dex::kDexNoIndex;
     } else {
@@ -229,32 +229,29 @@
   uint16_t number_of_dex_registers = accessor.RegistersSize();
   DCHECK_LT(vreg, number_of_dex_registers);
   const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(method_header);
 
   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   DCHECK(stack_map.IsValid());
   size_t depth_in_stack_map = current_inlining_depth_ - 1;
 
   DexRegisterMap dex_register_map = IsInInlinedFrame()
       ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
-                                           code_info.GetInlineInfoOf(stack_map, encoding),
-                                           encoding,
+                                           code_info.GetInlineInfoOf(stack_map),
                                            number_of_dex_registers)
-      : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
 
   if (!dex_register_map.IsValid()) {
     return false;
   }
   DexRegisterLocation::Kind location_kind =
-      dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
+      dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
   switch (location_kind) {
     case DexRegisterLocation::Kind::kInStack: {
       const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
                                                                     number_of_dex_registers,
-                                                                    code_info,
-                                                                    encoding);
+                                                                    code_info);
       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
       *val = *reinterpret_cast<const uint32_t*>(addr);
       return true;
@@ -264,11 +261,11 @@
     case DexRegisterLocation::Kind::kInFpuRegister:
     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
       uint32_t reg =
-          dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
+          dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
       return GetRegisterIfAccessible(reg, kind, val);
     }
     case DexRegisterLocation::Kind::kConstant:
-      *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
+      *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
       return true;
     case DexRegisterLocation::Kind::kNone:
       return false;
@@ -277,8 +274,7 @@
           << "Unexpected location kind "
           << dex_register_map.GetLocationInternalKind(vreg,
                                                       number_of_dex_registers,
-                                                      code_info,
-                                                      encoding);
+                                                      code_info);
       UNREACHABLE();
   }
 }
@@ -718,7 +714,7 @@
   Runtime* runtime = Runtime::Current();
 
   if (method->IsAbstract()) {
-    return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
   // This goes before IsProxyMethod since runtime methods have a null declaring class.
@@ -732,7 +728,7 @@
     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
     DCHECK(!method->IsDirect() && !method->IsConstructor())
         << "Constructors of proxy classes must have a OatQuickMethodHeader";
-    return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
   // The only remaining case is if the method is native and uses the generic JNI stub,
@@ -751,8 +747,8 @@
   // Generic JNI frame.
   uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
   size_t scope_size = HandleScope::SizeOf(handle_refs);
-  QuickMethodFrameInfo callee_info =
-      runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+  constexpr QuickMethodFrameInfo callee_info =
+      RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
 
   // Callee saves + handle scope + method ref + alignment
   // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
@@ -830,15 +826,14 @@
         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
             && (cur_oat_quick_method_header_ != nullptr)
             && cur_oat_quick_method_header_->IsOptimized()) {
-          CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
-          CodeInfoEncoding encoding = code_info.ExtractEncoding();
+          CodeInfo code_info(cur_oat_quick_method_header_);
           uint32_t native_pc_offset =
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
-          if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+          if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
             DCHECK_EQ(current_inlining_depth_, 0u);
-            for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
+            for (current_inlining_depth_ = inline_info.GetDepth();
                  current_inlining_depth_ != 0;
                  --current_inlining_depth_) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9c7b687..2b7e8dd 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -25,8 +25,6 @@
 namespace art {
 
 constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
-constexpr uint32_t StackMap::kNoDexRegisterMap;
-constexpr uint32_t StackMap::kNoInlineInfo;
 
 std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind) {
   using Kind = DexRegisterLocation::Kind;
@@ -56,27 +54,25 @@
 DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
     uint16_t dex_register_number,
     uint16_t number_of_dex_registers,
-    const CodeInfo& code_info,
-    const CodeInfoEncoding& enc) const {
+    const CodeInfo& code_info) const {
   DexRegisterLocationCatalog dex_register_location_catalog =
-      code_info.GetDexRegisterLocationCatalog(enc);
+      code_info.GetDexRegisterLocationCatalog();
   size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
       dex_register_number,
       number_of_dex_registers,
-      code_info.GetNumberOfLocationCatalogEntries(enc));
+      code_info.GetNumberOfLocationCatalogEntries());
   return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
 }
 
 DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
                                                            uint16_t number_of_dex_registers,
-                                                           const CodeInfo& code_info,
-                                                           const CodeInfoEncoding& enc) const {
+                                                           const CodeInfo& code_info) const {
   DexRegisterLocationCatalog dex_register_location_catalog =
-      code_info.GetDexRegisterLocationCatalog(enc);
+      code_info.GetDexRegisterLocationCatalog();
   size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
       dex_register_number,
       number_of_dex_registers,
-      code_info.GetNumberOfLocationCatalogEntries(enc));
+      code_info.GetNumberOfLocationCatalogEntries());
   return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
 }
 
@@ -90,27 +86,28 @@
      << " (" << location.GetValue() << ")" << suffix << '\n';
 }
 
-void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void StackMap::DumpEncoding(const BitTable<6>& table,
+                            VariableIndentationOutputStream* vios) {
   vios->Stream()
       << "StackMapEncoding"
-      << " (native_pc_bit_offset=" << static_cast<uint32_t>(kNativePcBitOffset)
-      << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
-      << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
-      << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
-      << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_)
-      << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_)
-      << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+      << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset)
+      << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+      << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
+      << ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex)
+      << ", RegisterMaskIndexBits=" << table.NumColumnBits(kRegisterMaskIndex)
+      << ", StackMaskIndexBits=" << table.NumColumnBits(kStackMaskIndex)
       << ")\n";
 }
 
-void InlineInfoEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void InlineInfo::DumpEncoding(const BitTable<5>& table,
+                              VariableIndentationOutputStream* vios) {
   vios->Stream()
       << "InlineInfoEncoding"
-      << " (method_index_bit_offset=" << static_cast<uint32_t>(kMethodIndexBitOffset)
-      << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
-      << ", extra_data_bit_offset=" << static_cast<uint32_t>(extra_data_bit_offset_)
-      << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
-      << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+      << " (IsLastBits=" << table.NumColumnBits(kIsLast)
+      << ", MethodIndexIdxBits=" << table.NumColumnBits(kMethodIndexIdx)
+      << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+      << ", ExtraDataBits=" << table.NumColumnBits(kExtraData)
+      << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
       << ")\n";
 }
 
@@ -120,26 +117,24 @@
                     bool dump_stack_maps,
                     InstructionSet instruction_set,
                     const MethodInfo& method_info) const {
-  CodeInfoEncoding encoding = ExtractEncoding();
-  size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
+  size_t number_of_stack_maps = GetNumberOfStackMaps();
   vios->Stream()
       << "Optimized CodeInfo (number_of_dex_registers=" << number_of_dex_registers
       << ", number_of_stack_maps=" << number_of_stack_maps
       << ")\n";
   ScopedIndentation indent1(vios);
-  encoding.stack_map.encoding.Dump(vios);
-  if (HasInlineInfo(encoding)) {
-    encoding.inline_info.encoding.Dump(vios);
+  StackMap::DumpEncoding(stack_maps_, vios);
+  if (HasInlineInfo()) {
+    InlineInfo::DumpEncoding(inline_infos_, vios);
   }
   // Display the Dex register location catalog.
-  GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
+  GetDexRegisterLocationCatalog().Dump(vios, *this);
   // Display stack maps along with (live) Dex register maps.
   if (dump_stack_maps) {
     for (size_t i = 0; i < number_of_stack_maps; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
+      StackMap stack_map = GetStackMapAt(i);
       stack_map.Dump(vios,
                      *this,
-                     encoding,
                      method_info,
                      code_offset,
                      number_of_dex_registers,
@@ -153,9 +148,8 @@
 
 void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
                                       const CodeInfo& code_info) {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
-  size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
+  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+  size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize();
   vios->Stream()
       << "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
       << ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
@@ -169,8 +163,7 @@
 void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
                           const CodeInfo& code_info,
                           uint16_t number_of_dex_registers) const {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   // TODO: Display the bit mask of live Dex registers.
   for (size_t j = 0; j < number_of_dex_registers; ++j) {
     if (IsDexRegisterLive(j)) {
@@ -178,8 +171,7 @@
           j, number_of_dex_registers, number_of_location_catalog_entries);
       DexRegisterLocation location = GetDexRegisterLocation(j,
                                                             number_of_dex_registers,
-                                                            code_info,
-                                                            encoding);
+                                                            code_info);
       ScopedIndentation indent1(vios);
       DumpRegisterMapping(
           vios->Stream(), j, location, "v",
@@ -190,38 +182,35 @@
 
 void StackMap::Dump(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
-                    const CodeInfoEncoding& encoding,
                     const MethodInfo& method_info,
                     uint32_t code_offset,
                     uint16_t number_of_dex_registers,
                     InstructionSet instruction_set,
                     const std::string& header_suffix) const {
-  StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
-  const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
+  const uint32_t pc_offset = GetNativePcOffset(instruction_set);
   vios->Stream()
       << "StackMap" << header_suffix
       << std::hex
       << " [native_pc=0x" << code_offset + pc_offset << "]"
-      << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
-      << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
+      << " (dex_pc=0x" << GetDexPc()
       << ", native_pc_offset=0x" << pc_offset
-      << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
-      << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
-      << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
+      << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset()
+      << ", inline_info_offset=0x" << GetInlineInfoIndex()
+      << ", register_mask=0x" << code_info.GetRegisterMaskOf(*this)
       << std::dec
       << ", stack_mask=0b";
-  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
-  for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
+  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(*this);
+  for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(); i < e; ++i) {
     vios->Stream() << stack_mask.LoadBit(e - i - 1);
   }
   vios->Stream() << ")\n";
-  if (HasDexRegisterMap(stack_map_encoding)) {
+  if (HasDexRegisterMap()) {
     DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
-        *this, encoding, number_of_dex_registers);
+        *this, number_of_dex_registers);
     dex_register_map.Dump(vios, code_info, number_of_dex_registers);
   }
-  if (HasInlineInfo(stack_map_encoding)) {
-    InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
+  if (HasInlineInfo()) {
+    InlineInfo inline_info = code_info.GetInlineInfoOf(*this);
     // We do not know the length of the dex register maps of inlined frames
     // at this level, so we just pass null to `InlineInfo::Dump` to tell
     // it not to look at these maps.
@@ -233,29 +222,27 @@
                       const CodeInfo& code_info,
                       const MethodInfo& method_info,
                       uint16_t number_of_dex_registers[]) const {
-  InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
   vios->Stream() << "InlineInfo with depth "
-                 << static_cast<uint32_t>(GetDepth(inline_info_encoding))
+                 << static_cast<uint32_t>(GetDepth())
                  << "\n";
 
-  for (size_t i = 0; i < GetDepth(inline_info_encoding); ++i) {
+  for (size_t i = 0; i < GetDepth(); ++i) {
     vios->Stream()
         << " At depth " << i
         << std::hex
-        << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i);
-    if (EncodesArtMethodAtDepth(inline_info_encoding, i)) {
+        << " (dex_pc=0x" << GetDexPcAtDepth(i);
+    if (EncodesArtMethodAtDepth(i)) {
       ScopedObjectAccess soa(Thread::Current());
-      vios->Stream() << ", method=" << GetArtMethodAtDepth(inline_info_encoding, i)->PrettyMethod();
+      vios->Stream() << ", method=" << GetArtMethodAtDepth(i)->PrettyMethod();
     } else {
       vios->Stream()
           << std::dec
-          << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
+          << ", method_index=" << GetMethodIndexAtDepth(method_info, i);
     }
     vios->Stream() << ")\n";
-    if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
       DexRegisterMap dex_register_map =
-          code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
+          code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
       ScopedIndentation indent1(vios);
       dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]);
     }
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 3839764..91cecf0 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -21,12 +21,14 @@
 
 #include "arch/code_offset.h"
 #include "base/bit_memory_region.h"
+#include "base/bit_table.h"
 #include "base/bit_utils.h"
 #include "base/bit_vector.h"
 #include "base/leb128.h"
 #include "base/memory_region.h"
 #include "dex/dex_file_types.h"
 #include "method_info.h"
+#include "oat_quick_method_header.h"
 
 namespace art {
 
@@ -37,13 +39,8 @@
 // (signed) values.
 static constexpr ssize_t kFrameSlotSize = 4;
 
-// Size of Dex virtual registers.
-static constexpr size_t kVRegSize = 4;
-
 class ArtMethod;
 class CodeInfo;
-class StackMapEncoding;
-struct CodeInfoEncoding;
 
 /**
  * Classes in the following file are wrapper on stack map information backed
@@ -452,35 +449,31 @@
   explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
   DexRegisterMap() {}
 
-  bool IsValid() const { return region_.pointer() != nullptr; }
+  bool IsValid() const { return region_.IsValid(); }
 
   // Get the surface kind of Dex register `dex_register_number`.
   DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
                                             uint16_t number_of_dex_registers,
-                                            const CodeInfo& code_info,
-                                            const CodeInfoEncoding& enc) const {
+                                            const CodeInfo& code_info) const {
     return DexRegisterLocation::ConvertToSurfaceKind(
-        GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
+        GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
   }
 
   // Get the internal kind of Dex register `dex_register_number`.
   DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
                                                     uint16_t number_of_dex_registers,
-                                                    const CodeInfo& code_info,
-                                                    const CodeInfoEncoding& enc) const;
+                                                    const CodeInfo& code_info) const;
 
   // Get the Dex register location `dex_register_number`.
   DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
                                              uint16_t number_of_dex_registers,
-                                             const CodeInfo& code_info,
-                                             const CodeInfoEncoding& enc) const;
+                                             const CodeInfo& code_info) const;
 
   int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
                                 uint16_t number_of_dex_registers,
-                                const CodeInfo& code_info,
-                                const CodeInfoEncoding& enc) const {
+                                const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
     // GetDexRegisterLocation returns the offset in bytes.
     return location.GetValue();
@@ -488,20 +481,18 @@
 
   int32_t GetConstant(uint16_t dex_register_number,
                       uint16_t number_of_dex_registers,
-                      const CodeInfo& code_info,
-                      const CodeInfoEncoding& enc) const {
+                      const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
     return location.GetValue();
   }
 
   int32_t GetMachineRegister(uint16_t dex_register_number,
                              uint16_t number_of_dex_registers,
-                             const CodeInfo& code_info,
-                             const CodeInfoEncoding& enc) const {
+                             const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
@@ -627,7 +618,7 @@
 
   // Return the size of the DexRegisterMap object, in bytes.
   size_t Size() const {
-    return region_.size();
+    return BitsToBytesRoundUp(region_.size_in_bits());
   }
 
   void Dump(VariableIndentationOutputStream* vios,
@@ -650,143 +641,12 @@
 
   static constexpr int kFixedSize = 0;
 
-  MemoryRegion region_;
+  BitMemoryRegion region_;
 
   friend class CodeInfo;
   friend class StackMapStream;
 };
 
-// Represents bit range of bit-packed integer field.
-// We reuse the idea from ULEB128p1 to support encoding of -1 (aka 0xFFFFFFFF).
-// If min_value is set to -1, we implicitly subtract one from any loaded value,
-// and add one to any stored value. This is generalized to any negative values.
-// In other words, min_value acts as a base and the stored value is added to it.
-struct FieldEncoding {
-  FieldEncoding(size_t start_offset, size_t end_offset, int32_t min_value = 0)
-      : start_offset_(start_offset), end_offset_(end_offset), min_value_(min_value) {
-    DCHECK_LE(start_offset_, end_offset_);
-    DCHECK_LE(BitSize(), 32u);
-  }
-
-  ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
-
-  template <typename Region>
-  ALWAYS_INLINE int32_t Load(const Region& region) const {
-    DCHECK_LE(end_offset_, region.size_in_bits());
-    return static_cast<int32_t>(region.LoadBits(start_offset_, BitSize())) + min_value_;
-  }
-
-  template <typename Region>
-  ALWAYS_INLINE void Store(Region region, int32_t value) const {
-    region.StoreBits(start_offset_, value - min_value_, BitSize());
-    DCHECK_EQ(Load(region), value);
-  }
-
- private:
-  size_t start_offset_;
-  size_t end_offset_;
-  int32_t min_value_;
-};
-
-class StackMapEncoding {
- public:
-  StackMapEncoding()
-      : dex_pc_bit_offset_(0),
-        dex_register_map_bit_offset_(0),
-        inline_info_bit_offset_(0),
-        register_mask_index_bit_offset_(0),
-        stack_mask_index_bit_offset_(0),
-        total_bit_size_(0) {}
-
-  // Set stack map bit layout based on given sizes.
-  // Returns the size of stack map in bits.
-  size_t SetFromSizes(size_t native_pc_max,
-                      size_t dex_pc_max,
-                      size_t dex_register_map_size,
-                      size_t number_of_inline_info,
-                      size_t number_of_register_masks,
-                      size_t number_of_stack_masks) {
-    total_bit_size_ = 0;
-    DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(native_pc_max);
-
-    dex_pc_bit_offset_ = total_bit_size_;
-    // Note: We're not encoding the dex pc if there is none. That's the case
-    // for an intrinsified native method, such as String.charAt().
-    if (dex_pc_max != dex::kDexNoIndex) {
-      total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
-    }
-
-    // We also need +1 for kNoDexRegisterMap, but since the size is strictly
-    // greater than any offset we might try to encode, we already implicitly have it.
-    dex_register_map_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-
-    // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
-    // greater than the offset we might try to encode, we already implicitly have it.
-    // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
-    inline_info_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
-
-    register_mask_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
-
-    stack_mask_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_stack_masks);
-
-    return total_bit_size_;
-  }
-
-  ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
-    return FieldEncoding(kNativePcBitOffset, dex_pc_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
-    return FieldEncoding(dex_pc_bit_offset_, dex_register_map_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
-    return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
-    return FieldEncoding(inline_info_bit_offset_,
-                         register_mask_index_bit_offset_,
-                         -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const {
-    return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const {
-    return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_);
-  }
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  // Encode the encoding into the vector.
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  // Decode the encoding from a pointer, updates the pointer.
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
-  void Dump(VariableIndentationOutputStream* vios) const;
-
- private:
-  static constexpr size_t kNativePcBitOffset = 0;
-  uint8_t dex_pc_bit_offset_;
-  uint8_t dex_register_map_bit_offset_;
-  uint8_t inline_info_bit_offset_;
-  uint8_t register_mask_index_bit_offset_;
-  uint8_t stack_mask_index_bit_offset_;
-  uint8_t total_bit_size_;
-};
-
 /**
  * A Stack Map holds compilation information for a specific PC necessary for:
  * - Mapping it to a dex PC,
@@ -794,248 +654,101 @@
  * - Knowing which registers hold objects,
  * - Knowing the inlining information,
  * - Knowing the values of dex registers.
- *
- * The information is of the form:
- *
- *   [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
- *   stack_mask_index].
  */
-class StackMap {
+class StackMap : public BitTable<6>::Accessor {
  public:
-  StackMap() {}
-  explicit StackMap(BitMemoryRegion region) : region_(region) {}
+  enum Field {
+    kNativePcOffset,
+    kDexPc,
+    kDexRegisterMapOffset,
+    kInlineInfoIndex,
+    kRegisterMaskIndex,
+    kStackMaskIndex,
+    kCount,
+  };
 
-  ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
+  StackMap() : BitTable<kCount>::Accessor(nullptr, -1) {}
+  StackMap(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
 
-  ALWAYS_INLINE uint32_t GetDexPc(const StackMapEncoding& encoding) const {
-    return encoding.GetDexPcEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
-    encoding.GetDexPcEncoding().Store(region_, dex_pc);
-  }
-
-  ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding,
-                                           InstructionSet instruction_set) const {
-    CodeOffset offset(
-        CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+  ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+    CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
     return offset.Uint32Value(instruction_set);
   }
 
-  ALWAYS_INLINE void SetNativePcCodeOffset(const StackMapEncoding& encoding,
-                                           CodeOffset native_pc_offset) {
-    encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
-  }
+  uint32_t GetDexPc() const { return Get<kDexPc>(); }
 
-  ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
-    return encoding.GetDexRegisterMapEncoding().Load(region_);
-  }
+  uint32_t GetDexRegisterMapOffset() const { return Get<kDexRegisterMapOffset>(); }
+  bool HasDexRegisterMap() const { return GetDexRegisterMapOffset() != kNoValue; }
 
-  ALWAYS_INLINE void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
-    encoding.GetDexRegisterMapEncoding().Store(region_, offset);
-  }
+  uint32_t GetInlineInfoIndex() const { return Get<kInlineInfoIndex>(); }
+  bool HasInlineInfo() const { return GetInlineInfoIndex() != kNoValue; }
 
-  ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetInlineInfoEncoding().Load(region_);
-  }
+  uint32_t GetRegisterMaskIndex() const { return Get<kRegisterMaskIndex>(); }
 
-  ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
-    encoding.GetInlineInfoEncoding().Store(region_, index);
-  }
+  uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); }
 
-  ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetRegisterMaskIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
-    encoding.GetRegisterMaskIndexEncoding().Store(region_, mask);
-  }
-
-  ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetStackMaskIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
-    encoding.GetStackMaskIndexEncoding().Store(region_, mask);
-  }
-
-  ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
-    return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
-  }
-
-  ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
-    return GetInlineInfoIndex(encoding) != kNoInlineInfo;
-  }
-
-  ALWAYS_INLINE bool Equals(const StackMap& other) const {
-    return region_.pointer() == other.region_.pointer() &&
-           region_.size() == other.region_.size() &&
-           region_.BitOffset() == other.region_.BitOffset();
-  }
-
+  static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios);
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& code_info,
-            const CodeInfoEncoding& encoding,
             const MethodInfo& method_info,
             uint32_t code_offset,
             uint16_t number_of_dex_registers,
             InstructionSet instruction_set,
             const std::string& header_suffix = "") const;
-
-  // Special (invalid) offset for the DexRegisterMapOffset field meaning
-  // that there is no Dex register map for this stack map.
-  static constexpr uint32_t kNoDexRegisterMap = -1;
-
-  // Special (invalid) offset for the InlineDescriptorOffset field meaning
-  // that there is no inline info for this stack map.
-  static constexpr uint32_t kNoInlineInfo = -1;
-
- private:
-  static constexpr int kFixedSize = 0;
-
-  BitMemoryRegion region_;
-
-  friend class StackMapStream;
-};
-
-class InlineInfoEncoding {
- public:
-  void SetFromSizes(size_t method_index_idx_max,
-                    size_t dex_pc_max,
-                    size_t extra_data_max,
-                    size_t dex_register_map_size) {
-    total_bit_size_ = kMethodIndexBitOffset;
-    total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
-
-    dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    // Note: We're not encoding the dex pc if there is none. That's the case
-    // for an intrinsified native method, such as String.charAt().
-    if (dex_pc_max != dex::kDexNoIndex) {
-      total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
-    }
-
-    extra_data_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(extra_data_max);
-
-    // We also need +1 for kNoDexRegisterMap, but since the size is strictly
-    // greater than any offset we might try to encode, we already implicitly have it.
-    dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
-    return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
-    return FieldEncoding(dex_pc_bit_offset_, extra_data_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetExtraDataEncoding() const {
-    return FieldEncoding(extra_data_bit_offset_, dex_register_map_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
-    return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  void Dump(VariableIndentationOutputStream* vios) const;
-
-  // Encode the encoding into the vector.
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  // Decode the encoding from a pointer, updates the pointer.
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
- private:
-  static constexpr uint8_t kIsLastBitOffset = 0;
-  static constexpr uint8_t kMethodIndexBitOffset = 1;
-  uint8_t dex_pc_bit_offset_;
-  uint8_t extra_data_bit_offset_;
-  uint8_t dex_register_map_bit_offset_;
-  uint8_t total_bit_size_;
 };
 
 /**
- * Inline information for a specific PC. The information is of the form:
- *
- *   [is_last,
- *    method_index (or ArtMethod high bits),
- *    dex_pc,
- *    extra_data (ArtMethod low bits or 1),
- *    dex_register_map_offset]+.
+ * Inline information for a specific PC.
+ * The row referenced from the StackMap holds information at depth 0.
+ * Following rows hold information for further depths.
  */
-class InlineInfo {
+class InlineInfo : public BitTable<5>::Accessor {
  public:
-  explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
+  enum Field {
+    kIsLast,  // Determines if there are further rows for further depths.
+    kMethodIndexIdx,  // Method index or ArtMethod high bits.
+    kDexPc,
+    kExtraData,  // ArtMethod low bits or 1.
+    kDexRegisterMapOffset,
+    kCount,
+  };
+  static constexpr uint32_t kLast = -1;
+  static constexpr uint32_t kMore = 0;
 
-  ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
+  InlineInfo(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
+
+  ALWAYS_INLINE InlineInfo AtDepth(uint32_t depth) const {
+    return InlineInfo(table_, this->row_ + depth);
+  }
+
+  uint32_t GetDepth() const {
     size_t depth = 0;
-    while (!GetRegionAtDepth(encoding, depth++).LoadBit(0)) { }  // Check is_last bit.
+    while (AtDepth(depth++).Get<kIsLast>() == kMore) { }
     return depth;
   }
 
-  ALWAYS_INLINE void SetDepth(const InlineInfoEncoding& encoding, uint32_t depth) {
-    DCHECK_GT(depth, 0u);
-    for (size_t d = 0; d < depth; ++d) {
-      GetRegionAtDepth(encoding, d).StoreBit(0, d == depth - 1);  // Set is_last bit.
-    }
+  uint32_t GetMethodIndexIdxAtDepth(uint32_t depth) const {
+    DCHECK(!EncodesArtMethodAtDepth(depth));
+    return AtDepth(depth).Get<kMethodIndexIdx>();
   }
 
-  ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
-                                                  uint32_t depth) const {
-    DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
-    return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
+  uint32_t GetMethodIndexAtDepth(const MethodInfo& method_info, uint32_t depth) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(depth));
   }
 
-  ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
-                                              uint32_t depth,
-                                              uint32_t index) {
-    encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+  uint32_t GetDexPcAtDepth(uint32_t depth) const {
+    return AtDepth(depth).Get<kDexPc>();
   }
 
-
-  ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
-                                               const MethodInfo& method_info,
-                                               uint32_t depth) const {
-    return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
+  bool EncodesArtMethodAtDepth(uint32_t depth) const {
+    return (AtDepth(depth).Get<kExtraData>() & 1) == 0;
   }
 
-  ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
-                                         uint32_t depth) const {
-    return encoding.GetDexPcEncoding().Load(GetRegionAtDepth(encoding, depth));
-  }
-
-  ALWAYS_INLINE void SetDexPcAtDepth(const InlineInfoEncoding& encoding,
-                                     uint32_t depth,
-                                     uint32_t dex_pc) {
-    encoding.GetDexPcEncoding().Store(GetRegionAtDepth(encoding, depth), dex_pc);
-  }
-
-  ALWAYS_INLINE bool EncodesArtMethodAtDepth(const InlineInfoEncoding& encoding,
-                                             uint32_t depth) const {
-    return (encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)) & 1) == 0;
-  }
-
-  ALWAYS_INLINE void SetExtraDataAtDepth(const InlineInfoEncoding& encoding,
-                                         uint32_t depth,
-                                         uint32_t extra_data) {
-    encoding.GetExtraDataEncoding().Store(GetRegionAtDepth(encoding, depth), extra_data);
-  }
-
-  ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
-                                               uint32_t depth) const {
-    uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
-    uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
-        GetRegionAtDepth(encoding, depth));
+  ArtMethod* GetArtMethodAtDepth(uint32_t depth) const {
+    uint32_t low_bits = AtDepth(depth).Get<kExtraData>();
+    uint32_t high_bits = AtDepth(depth).Get<kMethodIndexIdx>();
     if (high_bits == 0) {
       return reinterpret_cast<ArtMethod*>(low_bits);
     } else {
@@ -1045,411 +758,132 @@
     }
   }
 
-  ALWAYS_INLINE uint32_t GetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
-                                                        uint32_t depth) const {
-    return encoding.GetDexRegisterMapEncoding().Load(GetRegionAtDepth(encoding, depth));
+  uint32_t GetDexRegisterMapOffsetAtDepth(uint32_t depth) const {
+    return AtDepth(depth).Get<kDexRegisterMapOffset>();
   }
 
-  ALWAYS_INLINE void SetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
-                                                    uint32_t depth,
-                                                    uint32_t offset) {
-    encoding.GetDexRegisterMapEncoding().Store(GetRegionAtDepth(encoding, depth), offset);
+  bool HasDexRegisterMapAtDepth(uint32_t depth) const {
+    return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoValue;
   }
 
-  ALWAYS_INLINE bool HasDexRegisterMapAtDepth(const InlineInfoEncoding& encoding,
-                                              uint32_t depth) const {
-    return GetDexRegisterMapOffsetAtDepth(encoding, depth) != StackMap::kNoDexRegisterMap;
-  }
-
+  static void DumpEncoding(const BitTable<5>& table, VariableIndentationOutputStream* vios);
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& info,
             const MethodInfo& method_info,
             uint16_t* number_of_dex_registers) const;
-
- private:
-  ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
-                                                 uint32_t depth) const {
-    size_t entry_size = encoding.BitSize();
-    DCHECK_GT(entry_size, 0u);
-    return region_.Subregion(depth * entry_size, entry_size);
-  }
-
-  BitMemoryRegion region_;
 };
 
-// Bit sized region encoding, may be more than 255 bits.
-class BitRegionEncoding {
+class InvokeInfo : public BitTable<3>::Accessor {
  public:
-  uint32_t num_bits = 0;
+  enum Field {
+    kNativePcOffset,
+    kInvokeType,
+    kMethodIndexIdx,
+    kCount,
+  };
 
-  ALWAYS_INLINE size_t BitSize() const {
-    return num_bits;
-  }
+  InvokeInfo(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
 
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_bits);  // Use leb in case num_bits is greater than 255.
-  }
-
-  void Decode(const uint8_t** ptr) {
-    num_bits = DecodeUnsignedLeb128(ptr);
-  }
-};
-
-// A table of bit sized encodings.
-template <typename Encoding>
-struct BitEncodingTable {
-  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-  // How the encoding is laid out (serialized).
-  Encoding encoding;
-
-  // Number of entries in the table (serialized).
-  size_t num_entries;
-
-  // Bit offset for the base of the table (computed).
-  size_t bit_offset = kInvalidOffset;
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_entries);
-    encoding.Encode(dest);
-  }
-
-  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
-    num_entries = DecodeUnsignedLeb128(ptr);
-    encoding.Decode(ptr);
-  }
-
-  // Set the bit offset in the table and adds the space used by the table to offset.
-  void UpdateBitOffset(size_t* offset) {
-    DCHECK(offset != nullptr);
-    bit_offset = *offset;
-    *offset += encoding.BitSize() * num_entries;
-  }
-
-  // Return the bit region for the map at index i.
-  ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
-    DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
-    DCHECK_LT(index, num_entries);
-    const size_t map_size = encoding.BitSize();
-    return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
-  }
-};
-
-// A byte sized table of possible variable sized encodings.
-struct ByteSizedTable {
-  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-
-  // Number of entries in the table (serialized).
-  size_t num_entries = 0;
-
-  // Number of bytes of the table (serialized).
-  size_t num_bytes;
-
-  // Bit offset for the base of the table (computed).
-  size_t byte_offset = kInvalidOffset;
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_entries);
-    EncodeUnsignedLeb128(dest, num_bytes);
-  }
-
-  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
-    num_entries = DecodeUnsignedLeb128(ptr);
-    num_bytes = DecodeUnsignedLeb128(ptr);
-  }
-
-  // Set the bit offset of the table. Adds the total bit size of the table to offset.
-  void UpdateBitOffset(size_t* offset) {
-    DCHECK(offset != nullptr);
-    DCHECK_ALIGNED(*offset, kBitsPerByte);
-    byte_offset = *offset / kBitsPerByte;
-    *offset += num_bytes * kBitsPerByte;
-  }
-};
-
-// Format is [native pc, invoke type, method index].
-class InvokeInfoEncoding {
- public:
-  void SetFromSizes(size_t native_pc_max,
-                    size_t invoke_type_max,
-                    size_t method_index_max) {
-    total_bit_size_ = 0;
-    DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(native_pc_max);
-    invoke_type_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(invoke_type_max);
-    method_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(method_index_max);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
-    return FieldEncoding(kNativePcBitOffset, invoke_type_bit_offset_);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetInvokeTypeEncoding() const {
-    return FieldEncoding(invoke_type_bit_offset_, method_index_bit_offset_);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
-    return FieldEncoding(method_index_bit_offset_, total_bit_size_);
-  }
-
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(InvokeInfoEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const InvokeInfoEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
- private:
-  static constexpr uint8_t kNativePcBitOffset = 0;
-  uint8_t invoke_type_bit_offset_;
-  uint8_t method_index_bit_offset_;
-  uint8_t total_bit_size_;
-};
-
-class InvokeInfo {
- public:
-  explicit InvokeInfo(BitMemoryRegion region) : region_(region) {}
-
-  ALWAYS_INLINE uint32_t GetNativePcOffset(const InvokeInfoEncoding& encoding,
-                                           InstructionSet instruction_set) const {
-    CodeOffset offset(
-        CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+  ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+    CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
     return offset.Uint32Value(instruction_set);
   }
 
-  ALWAYS_INLINE void SetNativePcCodeOffset(const InvokeInfoEncoding& encoding,
-                                           CodeOffset native_pc_offset) {
-    encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
+  uint32_t GetInvokeType() const { return Get<kInvokeType>(); }
+
+  uint32_t GetMethodIndexIdx() const { return Get<kMethodIndexIdx>(); }
+
+  uint32_t GetMethodIndex(MethodInfo method_info) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdx());
   }
-
-  ALWAYS_INLINE uint32_t GetInvokeType(const InvokeInfoEncoding& encoding) const {
-    return encoding.GetInvokeTypeEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetInvokeType(const InvokeInfoEncoding& encoding, uint32_t invoke_type) {
-    encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
-  }
-
-  ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
-    return encoding.GetMethodIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
-                                       uint32_t method_index_idx) {
-    encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
-  }
-
-  ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
-                                        MethodInfo method_info) const {
-    return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
-  }
-
-  bool IsValid() const { return region_.pointer() != nullptr; }
-
- private:
-  BitMemoryRegion region_;
-};
-
-// Most of the fields are encoded as ULEB128 to save space.
-struct CodeInfoEncoding {
-  using SizeType = uint32_t;
-
-  static constexpr SizeType kInvalidSize = std::numeric_limits<SizeType>::max();
-
-  // Byte sized tables go first to avoid unnecessary alignment bits.
-  ByteSizedTable dex_register_map;
-  ByteSizedTable location_catalog;
-  BitEncodingTable<StackMapEncoding> stack_map;
-  BitEncodingTable<BitRegionEncoding> register_mask;
-  BitEncodingTable<BitRegionEncoding> stack_mask;
-  BitEncodingTable<InvokeInfoEncoding> invoke_info;
-  BitEncodingTable<InlineInfoEncoding> inline_info;
-
-  CodeInfoEncoding() {}
-
-  explicit CodeInfoEncoding(const void* data) {
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
-    dex_register_map.Decode(&ptr);
-    location_catalog.Decode(&ptr);
-    stack_map.Decode(&ptr);
-    register_mask.Decode(&ptr);
-    stack_mask.Decode(&ptr);
-    invoke_info.Decode(&ptr);
-    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      inline_info.Decode(&ptr);
-    } else {
-      inline_info = BitEncodingTable<InlineInfoEncoding>();
-    }
-    cache_header_size =
-        dchecked_integral_cast<SizeType>(ptr - reinterpret_cast<const uint8_t*>(data));
-    ComputeTableOffsets();
-  }
-
-  // Compress is not const since it calculates cache_header_size. This is used by PrepareForFillIn.
-  template<typename Vector>
-  void Compress(Vector* dest) {
-    dex_register_map.Encode(dest);
-    location_catalog.Encode(dest);
-    stack_map.Encode(dest);
-    register_mask.Encode(dest);
-    stack_mask.Encode(dest);
-    invoke_info.Encode(dest);
-    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      inline_info.Encode(dest);
-    }
-    cache_header_size = dest->size();
-  }
-
-  ALWAYS_INLINE void ComputeTableOffsets() {
-    // Skip the header.
-    size_t bit_offset = HeaderSize() * kBitsPerByte;
-    // The byte tables must be aligned so they must go first.
-    dex_register_map.UpdateBitOffset(&bit_offset);
-    location_catalog.UpdateBitOffset(&bit_offset);
-    // Other tables don't require alignment.
-    stack_map.UpdateBitOffset(&bit_offset);
-    register_mask.UpdateBitOffset(&bit_offset);
-    stack_mask.UpdateBitOffset(&bit_offset);
-    invoke_info.UpdateBitOffset(&bit_offset);
-    inline_info.UpdateBitOffset(&bit_offset);
-    cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
-  }
-
-  ALWAYS_INLINE size_t HeaderSize() const {
-    DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
-    return cache_header_size;
-  }
-
-  ALWAYS_INLINE size_t NonHeaderSize() const {
-    DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
-    return cache_non_header_size;
-  }
-
- private:
-  // Computed fields (not serialized).
-  // Header size in bytes, cached to avoid needing to re-decoding the encoding in HeaderSize.
-  SizeType cache_header_size = kInvalidSize;
-  // Non header size in bytes, cached to avoid needing to re-decoding the encoding in NonHeaderSize.
-  SizeType cache_non_header_size = kInvalidSize;
 };
 
 /**
  * Wrapper around all compiler information collected for a method.
  * The information is of the form:
  *
- *   [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
- *    InlineInfo*]
+ *   [BitTable<Header>, BitTable<StackMap>, BitTable<RegisterMask>, BitTable<InlineInfo>,
+ *    BitTable<InvokeInfo>, BitTable<StackMask>, DexRegisterMap, DexLocationCatalog]
  *
- * where CodeInfoEncoding is of the form:
- *
- *   [ByteSizedTable(dex_register_map), ByteSizedTable(location_catalog),
- *    BitEncodingTable<StackMapEncoding>, BitEncodingTable<BitRegionEncoding>,
- *    BitEncodingTable<BitRegionEncoding>, BitEncodingTable<InlineInfoEncoding>]
  */
 class CodeInfo {
  public:
-  explicit CodeInfo(MemoryRegion region) : region_(region) {
-  }
-
   explicit CodeInfo(const void* data) {
-    CodeInfoEncoding encoding = CodeInfoEncoding(data);
-    region_ = MemoryRegion(const_cast<void*>(data),
-                           encoding.HeaderSize() + encoding.NonHeaderSize());
+    Decode(reinterpret_cast<const uint8_t*>(data));
   }
 
-  CodeInfoEncoding ExtractEncoding() const {
-    CodeInfoEncoding encoding(region_.begin());
-    AssertValidStackMap(encoding);
-    return encoding;
+  explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
+    DCHECK_EQ(size_, region.size());
   }
 
-  bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
+  explicit CodeInfo(const OatQuickMethodHeader* header)
+    : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
   }
 
-  DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
-    return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
-                                                        encoding.location_catalog.num_bytes));
+  size_t Size() const {
+    return size_;
   }
 
-  ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_mask.encoding.BitSize();
+  bool HasInlineInfo() const {
+    return stack_maps_.NumColumnBits(StackMap::kInlineInfoIndex) != 0;
   }
 
-  ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
-    return StackMap(encoding.stack_map.BitRegion(region_, index));
+  DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+    return DexRegisterLocationCatalog(location_catalog_);
   }
 
-  BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
-    return encoding.stack_mask.BitRegion(region_, index);
+  ALWAYS_INLINE size_t GetNumberOfStackMaskBits() const {
+    return stack_mask_bits_;
   }
 
-  BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
-                                 const StackMap& stack_map) const {
-    return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
+  ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
+    return StackMap(&stack_maps_, index);
   }
 
-  BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
-    return encoding.register_mask.BitRegion(region_, index);
+  BitMemoryRegion GetStackMask(size_t index) const {
+    return stack_masks_.Subregion(index * stack_mask_bits_, stack_mask_bits_);
   }
 
-  uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
-    size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
-    return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
+  BitMemoryRegion GetStackMaskOf(const StackMap& stack_map) const {
+    return GetStackMask(stack_map.GetStackMaskIndex());
   }
 
-  uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
-    return encoding.location_catalog.num_entries;
+  uint32_t GetRegisterMaskOf(const StackMap& stack_map) const {
+    return register_masks_.Get(stack_map.GetRegisterMaskIndex());
   }
 
-  uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
-    return encoding.location_catalog.num_bytes;
+  uint32_t GetNumberOfLocationCatalogEntries() const {
+    return location_catalog_entries_;
   }
 
-  uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.num_entries;
+  uint32_t GetDexRegisterLocationCatalogSize() const {
+    return location_catalog_.size();
   }
 
-  // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
-  ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
+  uint32_t GetNumberOfStackMaps() const {
+    return stack_maps_.NumRows();
   }
 
-  InvokeInfo GetInvokeInfo(const CodeInfoEncoding& encoding, size_t index) const {
-    return InvokeInfo(encoding.invoke_info.BitRegion(region_, index));
+  InvokeInfo GetInvokeInfo(size_t index) const {
+    return InvokeInfo(&invoke_infos_, index);
   }
 
   DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
-                                     const CodeInfoEncoding& encoding,
                                      size_t number_of_dex_registers) const {
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+    if (!stack_map.HasDexRegisterMap()) {
       return DexRegisterMap();
     }
-    const uint32_t offset = encoding.dex_register_map.byte_offset +
-        stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
-    size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
-    return DexRegisterMap(region_.Subregion(offset, size));
+    const uint32_t offset = stack_map.GetDexRegisterMapOffset();
+    size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+    return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
   }
 
-  size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
-                                uint32_t number_of_dex_registers) const {
+  size_t GetDexRegisterMapsSize(uint32_t number_of_dex_registers) const {
     size_t total = 0;
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      DexRegisterMap map(GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      DexRegisterMap map(GetDexRegisterMapOf(stack_map, number_of_dex_registers));
       total += map.Size();
     }
     return total;
@@ -1458,38 +892,30 @@
   // Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
   DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
                                           InlineInfo inline_info,
-                                          const CodeInfoEncoding& encoding,
                                           uint32_t number_of_dex_registers) const {
-    if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
+    if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
       return DexRegisterMap();
     } else {
-      uint32_t offset = encoding.dex_register_map.byte_offset +
-          inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
-      size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
-      return DexRegisterMap(region_.Subregion(offset, size));
+      uint32_t offset = inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+      size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+      return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
     }
   }
 
-  InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
-    // Since we do not know the depth, we just return the whole remaining map. The caller may
-    // access the inline info for arbitrary depths. To return the precise inline info we would need
-    // to count the depth before returning.
-    // TODO: Clean this up.
-    const size_t bit_offset = encoding.inline_info.bit_offset +
-        index * encoding.inline_info.encoding.BitSize();
-    return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+  InlineInfo GetInlineInfo(size_t index) const {
+    return InlineInfo(&inline_infos_, index);
   }
 
-  InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
-    DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
-    return GetInlineInfo(index, encoding);
+  InlineInfo GetInlineInfoOf(StackMap stack_map) const {
+    DCHECK(stack_map.HasInlineInfo());
+    uint32_t index = stack_map.GetInlineInfoIndex();
+    return GetInlineInfo(index);
   }
 
-  StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+  StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetDexPc() == dex_pc) {
         return stack_map;
       }
     }
@@ -1498,40 +924,39 @@
 
   // Searches the stack map list backwards because catch stack maps are stored
   // at the end.
-  StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
-      StackMap stack_map = GetStackMapAt(i - 1, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+  StackMap GetCatchStackMapForDexPc(uint32_t dex_pc) const {
+    for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+      StackMap stack_map = GetStackMapAt(i - 1);
+      if (stack_map.GetDexPc() == dex_pc) {
         return stack_map;
       }
     }
     return StackMap();
   }
 
-  StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    size_t e = GetNumberOfStackMaps(encoding);
+  StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
+    size_t e = GetNumberOfStackMaps();
     if (e == 0) {
       // There cannot be OSR stack map if there is no stack map.
       return StackMap();
     }
     // Walk over all stack maps. If two consecutive stack maps are identical, then we
     // have found a stack map suitable for OSR.
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
     for (size_t i = 0; i < e - 1; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
-        StackMap other = GetStackMapAt(i + 1, encoding);
-        if (other.GetDexPc(stack_map_encoding) == dex_pc &&
-            other.GetNativePcOffset(stack_map_encoding, kRuntimeISA) ==
-                stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA)) {
-          DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
-                    stack_map.GetDexRegisterMapOffset(stack_map_encoding));
-          DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetDexPc() == dex_pc) {
+        StackMap other = GetStackMapAt(i + 1);
+        if (other.GetDexPc() == dex_pc &&
+            other.GetNativePcOffset(kRuntimeISA) ==
+                stack_map.GetNativePcOffset(kRuntimeISA)) {
+          DCHECK_EQ(other.GetDexRegisterMapOffset(),
+                    stack_map.GetDexRegisterMapOffset());
+          DCHECK(!stack_map.HasInlineInfo());
           if (i < e - 2) {
             // Make sure there are not three identical stack maps following each other.
             DCHECK_NE(
-                stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA),
-                GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding, kRuntimeISA));
+                stack_map.GetNativePcOffset(kRuntimeISA),
+                GetStackMapAt(i + 2).GetNativePcOffset(kRuntimeISA));
           }
           return stack_map;
         }
@@ -1540,30 +965,27 @@
     return StackMap();
   }
 
-  StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
-                                        const CodeInfoEncoding& encoding) const {
+  StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
     // TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
     //       maps are not. If we knew that the method does not have try/catch,
     //       we could do binary search.
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
-          native_pc_offset) {
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
         return stack_map;
       }
     }
     return StackMap();
   }
 
-  InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset,
-                                            const CodeInfoEncoding& encoding) {
-    for (size_t index = 0; index < encoding.invoke_info.num_entries; index++) {
-      InvokeInfo item = GetInvokeInfo(encoding, index);
-      if (item.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA) == native_pc_offset) {
+  InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
+    for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
+      InvokeInfo item = GetInvokeInfo(index);
+      if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
         return item;
       }
     }
-    return InvokeInfo(BitMemoryRegion());
+    return InvokeInfo(&invoke_infos_, -1);
   }
 
   // Dump this CodeInfo object on `os`.  `code_offset` is the (absolute)
@@ -1578,23 +1000,10 @@
             InstructionSet instruction_set,
             const MethodInfo& method_info) const;
 
-  // Check that the code info has valid stack map and abort if it does not.
-  void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
-    if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
-      LOG(FATAL) << region_.size() << "\n"
-                 << encoding.HeaderSize() << "\n"
-                 << encoding.NonHeaderSize() << "\n"
-                 << encoding.location_catalog.num_entries << "\n"
-                 << encoding.stack_map.num_entries << "\n"
-                 << encoding.stack_map.encoding.BitSize();
-    }
-  }
-
  private:
   // Compute the size of the Dex register map associated to the stack map at
   // `dex_register_map_offset_in_code_info`.
-  size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
-                                     uint32_t dex_register_map_offset_in_code_info,
+  size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset,
                                      uint16_t number_of_dex_registers) const {
     // Offset where the actual mapping data starts within art::DexRegisterMap.
     size_t location_mapping_data_offset_in_dex_register_map =
@@ -1602,12 +1011,12 @@
     // Create a temporary art::DexRegisterMap to be able to call
     // art::DexRegisterMap::GetNumberOfLiveDexRegisters and
     DexRegisterMap dex_register_map_without_locations(
-        MemoryRegion(region_.Subregion(dex_register_map_offset_in_code_info,
-                                       location_mapping_data_offset_in_dex_register_map)));
+        MemoryRegion(dex_register_maps_.Subregion(dex_register_map_offset,
+                                        location_mapping_data_offset_in_dex_register_map)));
     size_t number_of_live_dex_registers =
         dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
     size_t location_mapping_data_size_in_bits =
-        DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries(encoding))
+        DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
         * number_of_live_dex_registers;
     size_t location_mapping_data_size_in_bytes =
         RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -1616,37 +1025,42 @@
     return dex_register_map_size;
   }
 
-  // Compute the size of a Dex register location catalog starting at offset `origin`
-  // in `region_` and containing `number_of_dex_locations` entries.
-  size_t ComputeDexRegisterLocationCatalogSize(uint32_t origin,
-                                               uint32_t number_of_dex_locations) const {
-    // TODO: Ideally, we would like to use art::DexRegisterLocationCatalog::Size or
-    // art::DexRegisterLocationCatalog::FindLocationOffset, but the
-    // DexRegisterLocationCatalog is not yet built.  Try to factor common code.
-    size_t offset = origin + DexRegisterLocationCatalog::kFixedSize;
-
-    // Skip the first `number_of_dex_locations - 1` entries.
-    for (uint16_t i = 0; i < number_of_dex_locations; ++i) {
-      // Read the first next byte and inspect its first 3 bits to decide
-      // whether it is a short or a large location.
-      DexRegisterLocationCatalog::ShortLocation first_byte =
-          region_.LoadUnaligned<DexRegisterLocationCatalog::ShortLocation>(offset);
-      DexRegisterLocation::Kind kind =
-          DexRegisterLocationCatalog::ExtractKindFromShortLocation(first_byte);
-      if (DexRegisterLocation::IsShortLocationKind(kind)) {
-        // Short location.  Skip the current byte.
-        offset += DexRegisterLocationCatalog::SingleShortEntrySize();
-      } else {
-        // Large location.  Skip the 5 next bytes.
-        offset += DexRegisterLocationCatalog::SingleLargeEntrySize();
-      }
-    }
-    size_t size = offset - origin;
-    return size;
+  MemoryRegion DecodeMemoryRegion(MemoryRegion& region, size_t* bit_offset) {
+    size_t length = DecodeVarintBits(BitMemoryRegion(region), bit_offset);
+    size_t offset = BitsToBytesRoundUp(*bit_offset);;
+    *bit_offset = (offset + length) * kBitsPerByte;
+    return region.Subregion(offset, length);
   }
 
-  MemoryRegion region_;
-  friend class StackMapStream;
+  void Decode(const uint8_t* data) {
+    size_t non_header_size = DecodeUnsignedLeb128(&data);
+    MemoryRegion region(const_cast<uint8_t*>(data), non_header_size);
+    BitMemoryRegion bit_region(region);
+    size_t bit_offset = 0;
+    size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
+    dex_register_maps_ = DecodeMemoryRegion(region, &bit_offset);
+    location_catalog_entries_ = DecodeVarintBits(bit_region, &bit_offset);
+    location_catalog_ = DecodeMemoryRegion(region, &bit_offset);
+    stack_maps_.Decode(bit_region, &bit_offset);
+    invoke_infos_.Decode(bit_region, &bit_offset);
+    inline_infos_.Decode(bit_region, &bit_offset);
+    register_masks_.Decode(bit_region, &bit_offset);
+    stack_mask_bits_ = DecodeVarintBits(bit_region, &bit_offset);
+    stack_masks_ = bit_region.Subregion(bit_offset, non_header_size * kBitsPerByte - bit_offset);
+  }
+
+  size_t size_;
+  MemoryRegion dex_register_maps_;
+  uint32_t location_catalog_entries_;
+  MemoryRegion location_catalog_;
+  BitTable<StackMap::Field::kCount> stack_maps_;
+  BitTable<InvokeInfo::Field::kCount> invoke_infos_;
+  BitTable<InlineInfo::Field::kCount> inline_infos_;
+  BitTable<1> register_masks_;
+  uint32_t stack_mask_bits_ = 0;
+  BitMemoryRegion stack_masks_;
+
+  friend class OatDumper;
 };
 
 #undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/thread.cc b/runtime/thread.cc
index eada24d..2275dae 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1115,21 +1115,10 @@
   Runtime* runtime = Runtime::Current();
   bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
 
-  // Valgrind on arm doesn't give the right values here. Do not install the guard page, and
-  // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
-  // stack_begin to 0.
-  const bool valgrind_on_arm =
-      (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) &&
-      kMemoryToolIsValgrind &&
-      RUNNING_ON_MEMORY_TOOL != 0;
-  if (valgrind_on_arm) {
-    tlsPtr_.stack_begin = nullptr;
-  }
-
   ResetDefaultStackEnd();
 
   // Install the protected region if we are doing implicit overflow checks.
-  if (implicit_stack_check && !valgrind_on_arm) {
+  if (implicit_stack_check) {
     // The thread might have protected region at the bottom.  We need
     // to install our own region so we need to move the limits
     // of the stack to make room for it.
@@ -3559,16 +3548,15 @@
       StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
           reinterpret_cast<uintptr_t>(cur_quick_frame));
       uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-      CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+      CodeInfo code_info(method_header);
+      StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(map.IsValid());
 
-      T vreg_info(m, code_info, encoding, map, visitor_);
+      T vreg_info(m, code_info, map, visitor_);
 
       // Visit stack entries that hold pointers.
-      const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding);
-      BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map);
+      const size_t number_of_bits = code_info.GetNumberOfStackMaskBits();
+      BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
       for (size_t i = 0; i < number_of_bits; ++i) {
         if (stack_mask.LoadBit(i)) {
           StackReference<mirror::Object>* ref_addr = vreg_base + i;
@@ -3583,7 +3571,7 @@
         }
       }
       // Visit callee-save registers that hold pointers.
-      uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map);
+      uint32_t register_mask = code_info.GetRegisterMaskOf(map);
       for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
         if (register_mask & (1 << i)) {
           mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
@@ -3631,7 +3619,6 @@
     struct UndefinedVRegInfo {
       UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
                         const CodeInfo& code_info ATTRIBUTE_UNUSED,
-                        const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED,
                         const StackMap& map ATTRIBUTE_UNUSED,
                         RootVisitor& _visitor)
           : visitor(_visitor) {
@@ -3662,14 +3649,11 @@
     struct StackMapVRegInfo {
       StackMapVRegInfo(ArtMethod* method,
                        const CodeInfo& _code_info,
-                       const CodeInfoEncoding& _encoding,
                        const StackMap& map,
                        RootVisitor& _visitor)
           : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
             code_info(_code_info),
-            encoding(_encoding),
             dex_register_map(code_info.GetDexRegisterMapOf(map,
-                                                           encoding,
                                                            number_of_dex_registers)),
             visitor(_visitor) {
       }
@@ -3684,7 +3668,7 @@
         bool found = false;
         for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
           DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
-              dex_reg, number_of_dex_registers, code_info, encoding);
+              dex_reg, number_of_dex_registers, code_info);
           if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
             visitor(ref, dex_reg, stack_visitor);
             found = true;
@@ -3718,7 +3702,6 @@
 
       size_t number_of_dex_registers;
       const CodeInfo& code_info;
-      const CodeInfoEncoding& encoding;
       DexRegisterMap dex_register_map;
       RootVisitor& visitor;
     };
diff --git a/runtime/var_handles.cc b/runtime/var_handles.cc
index e6730c6..f08742f 100644
--- a/runtime/var_handles.cc
+++ b/runtime/var_handles.cc
@@ -89,8 +89,8 @@
                                                   result);
   } else {
     DCHECK_EQ(match_kind, mirror::VarHandle::MatchKind::kNone);
-    ThrowWrongMethodTypeException(var_handle->GetMethodTypeForAccessMode(self, access_mode),
-                                  callsite_type.Get());
+    ThrowWrongMethodTypeException(var_handle->PrettyDescriptorForAccessMode(access_mode),
+                                  callsite_type->PrettyDescriptor());
     return false;
   }
 }
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 6a9bf61..2b57824 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -33,7 +33,7 @@
 14 (class java.lang.Short)
 [java.lang.String(int,int,char[]), public java.lang.String(), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder)]
 [private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
-[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), void java.lang.String.getChars(char[],int)]
+[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), private static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(char[],int,int,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(char[],int,int,java.lang.String,int), void java.lang.String.getChars(char[],int)]
 []
 [interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
 0
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 49db0c8..a4d0d0c 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -111,8 +111,6 @@
     jint,
     jboolean) {
 #if __linux__
-  // TODO: What to do on Valgrind?
-
   std::unique_ptr<Backtrace> bt(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, GetTid()));
   if (!bt->Unwind(0, nullptr)) {
     printf("Cannot unwind in process.\n");
@@ -188,7 +186,6 @@
     jboolean,
     jint pid_int) {
 #if __linux__
-  // TODO: What to do on Valgrind?
   pid_t pid = static_cast<pid_t>(pid_int);
 
   // OK, this is painful. debuggerd uses ptrace to unwind other processes.
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index ebde3bf..93c1538 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -1137,6 +1137,126 @@
 
   static Object[] sArray;
 
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const0>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG:                 Return [<<Const1>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (after)
+  /// CHECK-NOT:                 NewArray
+  /// CHECK-NOT:                 ArraySet
+  /// CHECK-NOT:                 ArrayGet
+  private static int testLocalArrayMerge1(boolean x) {
+    // The explicit store can be removed right away
+    // since it is equivalent to the default.
+    int[] a = { 0 };
+    // The diamond pattern stores/load can be replaced
+    // by the direct value.
+    if (x) {
+      a[0] = 1;
+    } else {
+      a[0] = 1;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+  /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const2>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const3>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (after)
+  /// CHECK-DAG:                 ArraySet
+  /// CHECK-DAG:                 ArraySet
+  /// CHECK-NOT:                 ArraySet
+  private static int testLocalArrayMerge2(boolean x) {
+    // The explicit store can be removed eventually even
+    // though it is not equivalent to the default.
+    int[] a = { 1 };
+    // The diamond pattern stores/load remain.
+    if (x) {
+      a[0] = 2;
+    } else {
+      a[0] = 3;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge3(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const2>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  private static int testLocalArrayMerge3(boolean x) {
+    // All stores/load remain.
+    int[] a = { 1 };
+    if (x) {
+      a[0] = 2;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const0>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG: <<Get2:a\d+>>   ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Get2>>]
+  /// CHECK-DAG:                 Return [<<Add>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Cnv1:b\d+>>   TypeConversion [<<Const1>>]
+  /// CHECK-DAG: <<Cnv2:a\d+>>   TypeConversion [<<Const1>>]
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Cnv1>>,<<Cnv2>>]
+  /// CHECK-DAG:                 Return [<<Add>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (after)
+  /// CHECK-NOT:                 NewArray
+  /// CHECK-NOT:                 ArraySet
+  /// CHECK-NOT:                 ArrayGet
+  private static int testLocalArrayMerge4(boolean x) {
+    byte[] a = { 0 };
+    if (x) {
+      a[0] = 1;
+    } else {
+      a[0] = 1;
+    }
+    // Differently typed (signed vs unsigned),
+    // but same reference.
+    return a[0] + (a[0] & 0xff);
+  }
+
   static void assertIntEquals(int result, int expected) {
     if (expected != result) {
       throw new Error("Expected: " + expected + ", found: " + result);
@@ -1271,6 +1391,15 @@
     assertIntEquals(testclass2.i, 55);
 
     assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
+
+    assertIntEquals(testLocalArrayMerge1(true), 1);
+    assertIntEquals(testLocalArrayMerge1(false), 1);
+    assertIntEquals(testLocalArrayMerge2(true), 2);
+    assertIntEquals(testLocalArrayMerge2(false), 3);
+    assertIntEquals(testLocalArrayMerge3(true), 2);
+    assertIntEquals(testLocalArrayMerge3(false), 1);
+    assertIntEquals(testLocalArrayMerge4(true), 2);
+    assertIntEquals(testLocalArrayMerge4(false), 2);
   }
 
   static boolean sFlag;
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index e2b8aa0..7c1507f 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -48,9 +48,8 @@
     }
   }
 
-  CodeInfo info = header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = info.ExtractEncoding();
-  CHECK(info.HasInlineInfo(encoding));
+  CodeInfo info(header);
+  CHECK(info.HasInlineInfo());
 }
 
 static void allocate_profiling_info(jclass cls, const char* method_name) {
diff --git a/test/677-fsi2/expected.txt b/test/677-fsi2/expected.txt
new file mode 100644
index 0000000..de00847
--- /dev/null
+++ b/test/677-fsi2/expected.txt
@@ -0,0 +1,4 @@
+Run default
+Hello World
+Run without dex2oat
+Hello World
diff --git a/test/677-fsi2/info.txt b/test/677-fsi2/info.txt
new file mode 100644
index 0000000..ed0a0f2
--- /dev/null
+++ b/test/677-fsi2/info.txt
@@ -0,0 +1 @@
+Test that -Xonly-use-system-oat-files works.
diff --git a/test/677-fsi2/run b/test/677-fsi2/run
new file mode 100644
index 0000000..039a6a7
--- /dev/null
+++ b/test/677-fsi2/run
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "Run default"
+${RUN} $@ --runtime-option -Xonly-use-system-oat-files
+return_status1=$?
+
+echo "Run without dex2oat"
+${RUN} $@ --no-dex2oat --runtime-option -Xonly-use-system-oat-files
+return_status2=$?
+
+(exit $return_status1) && (exit $return_status2)
diff --git a/test/677-fsi2/src/Main.java b/test/677-fsi2/src/Main.java
new file mode 100644
index 0000000..834075f
--- /dev/null
+++ b/test/677-fsi2/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    System.out.println("Hello World");
+  }
+}
diff --git a/test/712-varhandle-invocations/src/SampleValues.java b/test/712-varhandle-invocations/src/SampleValues.java
new file mode 100644
index 0000000..79f4f19
--- /dev/null
+++ b/test/712-varhandle-invocations/src/SampleValues.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Sample values for use in VarHandle tests. These are here to avoid repeatedly boxing which
+ * makes gcstress tests run slowly. */
+public class SampleValues {
+    public static final boolean[] PRIMITIVE_BOOLEANS = new boolean[] {true, false};
+
+    public static final Boolean[] BOOLEANS = new Boolean[] {true, false};
+
+    public static final byte[] PRIMITIVE_BYTES =
+            new byte[] {(byte) -128, (byte) -61, (byte) 7, (byte) 127, (byte) 33};
+
+    public static final Byte[] BYTES =
+            new Byte[] {(byte) -128, (byte) -61, (byte) 7, (byte) 127, (byte) 33};
+
+    public static final short[] PRIMITIVE_SHORTS =
+            new short[] {(short) -32768, (short) -384, (short) 32767, (short) 0xaa55};
+
+    public static final Short[] SHORTS =
+            new Short[] {(short) -32768, (short) -384, (short) 32767, (short) 0xaa55};
+
+    public static final char[] PRIMITIVE_CHARS =
+            new char[] {'A', '#', '$', 'Z', 't', 'c'};
+
+    public static final Character[] CHARACTERS =
+            new Character[] {'A', '#', '$', 'Z', 't', 'c'};
+
+    public static final int[] PRIMITIVE_INTS =
+            new int[] {-0x01234567, 0x7f6e5d4c, 0x12345678, 0x10215220, 42};
+
+    public static final Integer[] INTEGERS =
+            new Integer[] {-0x01234567, 0x7f6e5d4c, 0x12345678, 0x10215220, 42};
+
+    public static final long[] PRIMITIVE_LONGS =
+            new long[] {-0x0123456789abcdefl, 0x789abcdef0123456l, 0xfedcba9876543210l};
+
+    public static final Long[] LONGS =
+            new Long[] {-0x0123456789abcdefl, 0x789abcdef0123456l, 0xfedcba9876543210l};
+
+    public static final float[] PRIMITIVE_FLOATS =
+            new float[] {-7.77e23f, 1.234e-17f, 3.40e36f, -8.888e3f, 4.442e11f};
+
+    public static final Float[] FLOATS =
+            new Float[] {-7.77e23f, 1.234e-17f, 3.40e36f, -8.888e3f, 4.442e11f};
+
+    public static final double[] PRIMITIVE_DOUBLES =
+            new double[] {-1.0e-200, 1.11e200, 3.141, 1.1111, 6.022e23, 6.626e-34};
+
+    public static final Double[] DOUBLES =
+            new Double[] {-1.0e-200, 1.11e200, 3.141, 1.1111, 6.022e23, 6.626e-34};
+
+    public static boolean get_boolean(int index) {
+        return PRIMITIVE_BOOLEANS[index];
+    }
+
+    public static Boolean get_Boolean(int index) {
+        return BOOLEANS[index];
+    }
+
+    public static byte get_byte(int index) {
+        return PRIMITIVE_BYTES[index];
+    }
+
+    public static Byte get_Byte(int index) {
+        return BYTES[index];
+    }
+
+    public static short get_short(int index) {
+        return PRIMITIVE_SHORTS[index];
+    }
+
+    public static Short get_Short(int index) {
+        return SHORTS[index];
+    }
+
+    public static char get_char(int index) {
+        return PRIMITIVE_CHARS[index];
+    }
+
+    public static Character get_Character(int index) {
+        return CHARACTERS[index];
+    }
+
+    public static int get_int(int index) {
+        return PRIMITIVE_INTS[index];
+    }
+
+    public static Integer get_Integer(int index) {
+        return INTEGERS[index];
+    }
+
+    public static long get_long(int index) {
+        return PRIMITIVE_LONGS[index];
+    }
+
+    public static Long get_Long(int index) {
+        return LONGS[index];
+    }
+
+    public static float get_float(int index) {
+        return PRIMITIVE_FLOATS[index];
+    }
+
+    public static Float get_Float(int index) {
+        return FLOATS[index];
+    }
+
+    public static double get_double(int index) {
+        return PRIMITIVE_DOUBLES[index];
+    }
+
+    public static Double get_Double(int index) {
+        return DOUBLES[index];
+    }
+}
+
diff --git a/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java b/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
index bc64c0c..5a69b54 100644
--- a/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
+++ b/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
@@ -19,30 +19,52 @@
 // Results collector for VarHandle Unit tests
 public final class VarHandleUnitTestCollector {
     private final PrintStream out = System.out;
+    private final boolean verbose = false;
 
     private int numberOfSuccesses;
     private int numberOfSkips;
     private int numberOfFailures;
+    private int consecutiveResults = 0;
+    private String current;
+    private long startMillis;
 
     public void start(String testName) {
-        out.print(testName);
-        out.print("...");
+        out.append(testName)
+                .append("...");
+        consecutiveResults = 0;
+        current = testName;
+        startMillis = System.currentTimeMillis();
+    }
+
+    private void printStatus(String status) {
+        out.print(status);
+        if (verbose) {
+            out.print('[');
+            out.print(System.currentTimeMillis() - startMillis);
+            out.print(']');
+        }
+        out.println();
     }
 
     public void skip() {
         numberOfSkips += 1;
-        out.println("SKIP");
+        printStatus("SKIP");
+        consecutiveResults++;
     }
 
     public void success() {
         numberOfSuccesses += 1;
-        out.println("OK");
+        printStatus("OK");
+        if (consecutiveResults++ > 1) {
+            throw new AssertionError("Oops: " + consecutiveResults);
+        }
     }
 
     public void fail(String errorMessage) {
         numberOfFailures += 1;
-        out.println("FAIL");
+        printStatus("FAIL");
         out.print(errorMessage);
+        consecutiveResults++;
     }
 
     public void printSummary() {
diff --git a/test/712-varhandle-invocations/util-src/generate_java.py b/test/712-varhandle-invocations/util-src/generate_java.py
index 9520b53..f535b40 100644
--- a/test/712-varhandle-invocations/util-src/generate_java.py
+++ b/test/712-varhandle-invocations/util-src/generate_java.py
@@ -757,7 +757,9 @@
 """)
     with io.StringIO() as body_text:
         compatible_types = types_that_widen_to(var_type)
-        for value_type in VALUE_TYPES:
+        incompatible_types = { RANDOM.choice(list(VALUE_TYPES - compatible_types)) }
+        test_types = compatible_types | incompatible_types
+        for value_type in test_types:
             print("try {", file=body_text)
             return_type = accessor.get_return_type(var_type)
             if return_type:
@@ -765,7 +767,7 @@
             print("vh.{0}(this".format(accessor.method_name), end="", file=body_text)
             num_args = accessor.get_number_of_var_type_arguments()
             for i in range(0, num_args):
-                print(", {0}({1})".format(value_type.boxing_method(), value_type.examples[i]), end="", file=body_text)
+                print(", SampleValues.get_{0}({1})".format(value_type.boxed_type, i), end="", file=body_text)
             print(");", file=body_text)
             if value_type in compatible_types:
                 print("   assertTrue(vh.isAccessModeSupported(VarHandle.AccessMode.{0}));".format(accessor.access_mode),
@@ -817,7 +819,9 @@
     with io.StringIO() as body_text:
         return_type = accessor.get_return_type(var_type)
         compatible_types = { return_type }
-        for value_type in VALUE_TYPES:
+        incompatible_types = { RANDOM.choice(list(VALUE_TYPES - compatible_types)) }
+        test_types = compatible_types | incompatible_types
+        for value_type in test_types:
             print("try {", file=body_text)
             print("{0} result = ({0}) ".format(value_type.boxed_type), end="", file=body_text)
             print("vh.{0}(this".format(accessor.method_name), end="", file=body_text)
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 31a0eef..493582f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -651,12 +651,6 @@
         "description": ["Requires zip, which isn't available on device"]
     },
     {
-        "tests": "712-varhandle-invocations",
-        "variant": "speed-profile & debug & gcstress & target",
-        "bug": "b/73275005",
-        "description": ["Time out"]
-    },
-    {
         "tests": ["1941-dispose-stress", "522-checker-regression-monitor-exit"],
         "variant": "jvm",
         "bug": "b/73888836",
@@ -956,6 +950,7 @@
     {
         "tests": ["616-cha-unloading",
                   "674-hiddenapi",
+                  "677-fsi2",
                   "678-quickening",
                   "679-locks",
                   "999-redefine-hiddenapi"],
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index e0757ab..faa4d91 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -266,14 +266,16 @@
         }
     },
     'art-gtest-valgrind32': {
-      # Disabled: x86 valgrind does not understand SSE4.x
+      # Disabled: Valgrind is no longer supported.
+      # Historical note: This was already disabled, as x86 valgrind did not understand SSE4.x
       # 'make' : 'valgrind-test-art-host32',
         'env': {
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
     'art-gtest-valgrind64': {
-        'make' : 'valgrind-test-art-host64',
+      # Disabled: Valgrind is no longer supported.
+      # 'make' : 'valgrind-test-art-host64',
         'env': {
             'ART_USE_READ_BARRIER' : 'false'
         }
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
deleted file mode 100644
index a97d03c..0000000
--- a/test/valgrind-suppressions.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-{
-   b/27596582
-   Memcheck:Cond
-   fun:index
-   fun:expand_dynamic_string_token
-   fun:_dl_map_object
-   fun:map_doit
-   fun:_dl_catch_error
-   fun:do_preload
-   fun:dl_main
-   fun:_dl_sysdep_start
-   fun:_dl_start_final
-   fun:_dl_start
-   obj:/lib/x86_64-linux-gnu/ld-2.19.so
-}
-
-{
-   b/31275764
-   Memcheck:Leak
-   match-leak-kinds: definite
-   fun:malloc
-   ...
-   fun:_ZN3art7Runtime17InitNativeMethodsEv
-}
-
-# SigQuit runs libbacktrace
-{
-   BackTraceReading64
-   Memcheck:Addr8
-   fun:access_mem_unrestricted
-   fun:_Uelf64_memory_read
-   fun:_Uelf64_valid_object_memory
-   fun:map_create_list
-   fun:unw_map_local_create
-   fun:_ZN14UnwindMapLocal5BuildEv
-   fun:_ZN12BacktraceMap6CreateEib
-}
-{
-   BackTraceReading32
-   Memcheck:Addr4
-   fun:access_mem_unrestricted
-   fun:_Uelf32_memory_read
-   fun:_Uelf32_valid_object_memory
-   fun:map_create_list
-   fun:unw_map_local_create
-   fun:_ZN14UnwindMapLocal5BuildEv
-   fun:_ZN12BacktraceMap6CreateEib
-}
-{
-   BackTraceReading64
-   Memcheck:Addr8
-   fun:access_mem_unrestricted
-   fun:_Uelf64_memory_read
-   fun:_Uelf64_get_load_base
-   fun:map_create_list
-   fun:unw_map_local_create
-   fun:_ZN14UnwindMapLocal5BuildEv
-   fun:_ZN12BacktraceMap6CreateEib
-}
-{
-   BackTraceReading32
-   Memcheck:Addr4
-   fun:access_mem_unrestricted
-   fun:_Uelf32_memory_read
-   fun:_Uelf32_get_load_base
-   fun:map_create_list
-   fun:unw_map_local_create
-   fun:_ZN14UnwindMapLocal5BuildEv
-   fun:_ZN12BacktraceMap6CreateEib
-}
-
-{
-   process_vm_readv
-   Memcheck:Param
-   process_vm_readv(lvec[...])
-   fun:process_vm_readv
-}
-
-# Suppressions for IsAddressMapped check in MemMapTest
-{
-   MemMapTest_IsAddressMapped
-   Memcheck:Param
-   msync(start)
-   ...
-   fun:_ZN3art10MemMapTest15IsAddressMappedEPv
-   ...
-}
diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt
deleted file mode 100644
index 0d63a1c..0000000
--- a/test/valgrind-target-suppressions.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-# Valgrind does not recognize the ashmen ioctl() calls on ARM64, so it assumes that a size
-# parameter is a pointer.
-{
-   ashmem ioctl
-   Memcheck:Param
-   ioctl(generic)
-   ...
-   fun:ioctl
-   fun:ashmem_create_region
-}
-
-# It seems that on ARM64 Valgrind considers the canary value used by the Clang stack protector to
-# be an uninitialized value.
-{
-   jemalloc chunk_alloc_cache
-   Memcheck:Cond
-   fun:je_chunk_alloc_cache
-}
-
-# The VectorImpl class does not hold a pointer to the allocated SharedBuffer structure, but to the
-# beginning of the data, which is effectively an interior pointer. Valgrind has limitations when
-# dealing with interior pointers.
-{
-   VectorImpl
-   Memcheck:Leak
-   match-leak-kinds:possible
-   fun:malloc
-   # The wildcards make this rule work both for 32-bit and 64-bit environments.
-   fun:_ZN7android12SharedBuffer5allocE?
-   fun:_ZN7android10VectorImpl5_growE??
-}
-
-# Clang/LLVM uses memcpy for *x = *y, even though x == y (which is undefined behavior). Ignore.
-# b/29279679, https://llvm.org/bugs/show_bug.cgi?id=11763
-{
-   MemCpySelfAssign
-   Memcheck:Overlap
-   fun:memcpy
-   ...
-   fun:je_malloc_tsd_boot0
-}
-
-# Setenv is known-leaking when overwriting mappings. This is triggered by re-initializing
-# ANDROID_DATA. Ignore all setenv leaks.
-{
-   SetenvAndroidDataReinit
-   Memcheck:Leak
-   match-leak-kinds: definite
-   fun:malloc
-   fun:setenv
-}
-
-{
-   b/31275764
-   Memcheck:Leak
-   match-leak-kinds: definite
-   fun:malloc
-   ...
-   fun:_ZN3art7Runtime17InitNativeMethodsEv
-}
-
-# art::MemMap::MapInternal() uses msync() to check for the existence of memory mappings.
-{
-  art::MemMap::MapInternal()
-  Memcheck:Param
-  msync(start)
-  fun:msync
-  fun:_ZN3art6MemMap11MapInternalEPvmiiilb
-}
-
-{
-   process_vm_readv
-   Memcheck:Param
-   process_vm_readv(lvec[...])
-   fun:process_vm_readv
-}
diff --git a/tools/art b/tools/art
index 1c603d4..781ee2f 100644
--- a/tools/art
+++ b/tools/art
@@ -77,7 +77,6 @@
 Supported OPTIONS include:
   --32                     Use the 32-bit Android Runtime.
   --64                     Use the 64-bit Android Runtime.
-  --callgrind              Launch the Android Runtime in callgrind.
   -d                       Use the debug ART library (libartd.so).
   --debug                  Equivalent to -d.
   --gdb                    Launch the Android Runtime in gdb.
@@ -269,9 +268,6 @@
   --64)
     ART_BINARY=dalvikvm64
     ;;
-  --callgrind)
-    LAUNCH_WRAPPER="valgrind --tool=callgrind"
-    ;;
   -d)
     ;& # Fallthrough
   --debug)
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
index 08d5885..977d1ca 100644
--- a/tools/cpp-define-generator/constant_lockword.def
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -23,23 +23,29 @@
 #define DEFINE_LOCK_WORD_EXPR(macro_name, type, constant_field_name) \
   DEFINE_EXPR(LOCK_WORD_ ## macro_name, type, art::LockWord::constant_field_name)
 
+// FIXME: The naming is inconsistent, the `Shifted` -> `_SHIFTED` suffix is sometimes missing.
 DEFINE_LOCK_WORD_EXPR(STATE_SHIFT,               int32_t,  kStateShift)
-DEFINE_LOCK_WORD_EXPR(STATE_MASK,                uint32_t, kStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(STATE_MASK_SHIFTED,        uint32_t, kStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_SHIFT,  int32_t,  kReadBarrierStateShift)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK,   uint32_t,  kReadBarrierStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK,   uint32_t, kReadBarrierStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       int32_t,  kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SIZE,      int32_t,  kThinLockCountSize)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SHIFT,     int32_t,  kThinLockCountShift)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_MASK_SHIFTED, uint32_t, kThinLockCountMaskShifted)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       uint32_t, kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_OWNER_MASK_SHIFTED, uint32_t, kThinLockOwnerMaskShifted)
 
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS, uint32_t, kStateForwardingAddress)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS,  uint32_t, kStateForwardingAddress)
 DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
 DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
 
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,   uint32_t,  kGCStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,     uint32_t,  kGCStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,   int32_t,  kGCStateShift)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SIZE,             int32_t,  kGCStateSize)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,            int32_t,  kGCStateShift)
 
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT, int32_t, kMarkBitStateShift)
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED, uint32_t, kMarkBitStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT,            int32_t,  kMarkBitStateShift)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED,     uint32_t, kMarkBitStateMaskShifted)
 
 #undef DEFINE_LOCK_WORD_EXPR
 
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index d376cad..eebc092 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -360,7 +360,9 @@
 if [[ $mode == "host" ]]; then
   pkill -9 -f /bin/dalvikvm
 else
-  adb shell pkill -9 -f /bin/dalvikvm
+  # Tests may run on older Android versions where pkill requires "-l SIGNAL"
+  # rather than "-SIGNAL".
+  adb shell pkill -l 9 -f /bin/dalvikvm
 fi
 echo "Done."
 
diff --git a/tools/tracefast-plugin/Android.bp b/tools/tracefast-plugin/Android.bp
new file mode 100644
index 0000000..1d7dd30
--- /dev/null
+++ b/tools/tracefast-plugin/Android.bp
@@ -0,0 +1,108 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+    name: "tracefast-defaults",
+    host_supported: true,
+    srcs: ["tracefast.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    target: {
+        android: {
+            shared_libs: [
+                "libcutils",
+            ],
+        },
+        darwin: {
+            enabled: false,
+        },
+    },
+    header_libs: [
+        "libnativehelper_header_only",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+cc_defaults {
+    name: "tracefast-interpreter-defaults",
+    defaults: ["tracefast-defaults"],
+    cflags: ["-DTRACEFAST_INTERPRETER=1"],
+}
+
+cc_defaults {
+    name: "tracefast-trampoline-defaults",
+    defaults: ["tracefast-defaults"],
+    cflags: ["-DTRACEFAST_TRAMPOLINE=1"],
+}
+
+art_cc_library {
+    name: "libtracefast-interpreter",
+    defaults: ["tracefast-interpreter-defaults"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-interpreterd",
+    defaults: [
+        "art_debug_defaults",
+        "tracefast-interpreter-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-trampoline",
+    defaults: ["tracefast-trampoline-defaults"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-trampolined",
+    defaults: [
+        "art_debug_defaults",
+        "tracefast-trampoline-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
+}
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
new file mode 100644
index 0000000..ed6ac3d
--- /dev/null
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/scoped_gc_critical_section.h"
+#include "instrumentation.h"
+#include "runtime.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace tracefast {
+
+#if ((!defined(TRACEFAST_INTERPRETER) && !defined(TRACEFAST_TRAMPOLINE)) || \
+     (defined(TRACEFAST_INTERPRETER) && defined(TRACEFAST_TRAMPOLINE)))
+#error Must set one of TRACEFAST_TRAMPOLINE or TRACEFAST_INTERPRETER during build
+#endif
+
+
+#ifdef TRACEFAST_INTERPRETER
+static constexpr const char* kTracerInstrumentationKey = "tracefast_INTERPRETER";
+static constexpr bool kNeedsInterpreter = true;
+#else  // defined(TRACEFAST_TRAMPOLINE)
+static constexpr const char* kTracerInstrumentationKey = "tracefast_TRAMPOLINE";
+static constexpr bool kNeedsInterpreter = false;
+#endif  // TRACEFAST_INITERPRETER
+
+class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+ public:
+  Tracer() {}
+
+  void MethodEntered(art::Thread* thread ATTRIBUTE_UNUSED,
+                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                     art::ArtMethod* method ATTRIBUTE_UNUSED,
+                     uint32_t dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    const art::JValue& return_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
+                  art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                  art::ArtMethod* method ATTRIBUTE_UNUSED,
+                  uint32_t new_dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
+                 art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                 art::ArtMethod* method ATTRIBUTE_UNUSED,
+                 uint32_t dex_pc ATTRIBUTE_UNUSED,
+                 art::ArtField* field ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::ArtField* field ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::ArtField* field ATTRIBUTE_UNUSED,
+                    const art::JValue& field_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
+                       art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
+                        art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
+              art::ArtMethod* method ATTRIBUTE_UNUSED,
+              uint32_t dex_pc ATTRIBUTE_UNUSED,
+              int32_t dex_pc_offset ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
+                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
+                                uint32_t dex_pc ATTRIBUTE_UNUSED,
+                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
+                       const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Tracer);
+};
+
+Tracer gEmptyTracer;
+
+static void StartTracing() REQUIRES(!art::Locks::mutator_lock_,
+                                    !art::Locks::thread_list_lock_,
+                                    !art::Locks::thread_suspend_count_lock_) {
+  art::Thread* self = art::Thread::Current();
+  art::Runtime* runtime = art::Runtime::Current();
+  art::gc::ScopedGCCriticalSection gcs(self,
+                                       art::gc::kGcCauseInstrumentation,
+                                       art::gc::kCollectorTypeInstrumentation);
+  art::ScopedSuspendAll ssa("starting fast tracing");
+  runtime->GetInstrumentation()->AddListener(&gEmptyTracer,
+                                             art::instrumentation::Instrumentation::kMethodEntered |
+                                             art::instrumentation::Instrumentation::kMethodExited |
+                                             art::instrumentation::Instrumentation::kMethodUnwind);
+  runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey, kNeedsInterpreter);
+}
+
+class TraceFastPhaseCB : public art::RuntimePhaseCallback {
+ public:
+  TraceFastPhaseCB() {}
+
+  void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
+      art::ScopedThreadSuspension sts(art::Thread::Current(),
+                                      art::ThreadState::kWaitingForMethodTracingStart);
+      StartTracing();
+    }
+  }
+};
+TraceFastPhaseCB gPhaseCallback;
+
+// The plugin initialization function.
+extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  art::Runtime* runtime = art::Runtime::Current();
+  art::ScopedThreadSuspension stsc(art::Thread::Current(),
+                                   art::ThreadState::kWaitingForMethodTracingStart);
+  art::ScopedSuspendAll ssa("Add phase callback");
+  runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gPhaseCallback);
+  return true;
+}
+
+extern "C" bool ArtPlugin_Deinitialize() {
+  // Don't need to bother doing anything.
+  return true;
+}
+
+}  // namespace tracefast